text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from util.cnn import fc_layer as fc, conv_relu_layer as conv_relu
def _get_lstm_cell(num_layers, lstm_dim, apply_dropout):
if isinstance(lstm_dim, list): # Different layers have different dimensions
if not len(lstm_dim) == num_layers:
raise ValueError('the length of lstm_dim must be equal to num_layers')
cell_list = []
for l in range(num_layers):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_dim[l], state_is_tuple=True)
# Dropout is only applied on output of the 1st to second-last layer.
# The output of the last layer has no dropout
if apply_dropout and l < num_layers-1:
dropout_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
output_keep_prob=0.5)
else:
dropout_cell = lstm_cell
cell_list.append(dropout_cell)
else: # All layers has the same dimension.
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_dim, state_is_tuple=True)
# Dropout is only applied on output of the 1st to second-last layer.
# The output of the last layer has no dropout
if apply_dropout:
dropout_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
output_keep_prob=0.5)
else:
dropout_cell = lstm_cell
cell_list = [dropout_cell] * (num_layers-1) + [lstm_cell]
cell = tf.contrib.rnn.MultiRNNCell(cell_list, state_is_tuple=True)
return cell
class AttentionSeq2Seq:
def __init__(self, input_seq_batch, seq_length_batch, T_decoder,
num_vocab_txt, embed_dim_txt, num_vocab_nmn, embed_dim_nmn,
lstm_dim, num_layers, EOS_token, encoder_dropout, decoder_dropout,
decoder_sampling, use_gt_layout=None, gt_layout_batch=None,
scope='encoder_decoder', reuse=None):
self.T_decoder = T_decoder
self.encoder_num_vocab = num_vocab_txt
self.encoder_embed_dim = embed_dim_txt
self.decoder_num_vocab = num_vocab_nmn
self.decoder_embed_dim = embed_dim_nmn
self.lstm_dim = lstm_dim
self.num_layers = num_layers
self.EOS_token = EOS_token
self.encoder_dropout = encoder_dropout
self.decoder_dropout = decoder_dropout
self.decoder_sampling = decoder_sampling
with tf.variable_scope(scope, reuse=reuse):
self._build_encoder(input_seq_batch, seq_length_batch)
self._build_decoder(use_gt_layout, gt_layout_batch)
def _build_encoder(self, input_seq_batch, seq_length_batch, scope='encoder',
reuse=None):
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.encoder_dropout
with tf.variable_scope(scope, reuse=reuse):
T = tf.shape(input_seq_batch)[0]
N = tf.shape(input_seq_batch)[1]
self.T_encoder = T
self.N = N
embedding_mat = tf.get_variable('embedding_mat',
[self.encoder_num_vocab, self.encoder_embed_dim])
# text_seq has shape [T, N] and embedded_seq has shape [T, N, D].
embedded_seq = tf.nn.embedding_lookup(embedding_mat, input_seq_batch)
self.embedded_input_seq = embedded_seq
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
# encoder_outputs has shape [T, N, lstm_dim]
encoder_outputs, encoder_states = tf.nn.dynamic_rnn(cell,
embedded_seq, seq_length_batch, dtype=tf.float32,
time_major=True, scope='lstm')
self.encoder_outputs = encoder_outputs
self.encoder_states = encoder_states
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(encoder_outputs, [-1, lstm_dim]), output_dim=lstm_dim)
encoder_h_transformed = tf.reshape(encoder_h_transformed,
to_T([T, N, lstm_dim]))
self.encoder_h_transformed = encoder_h_transformed
# seq_not_finished is a shape [T, N, 1] tensor, where seq_not_finished[t, n]
# is 1 iff sequence n is not finished at time t, and 0 otherwise
seq_not_finished = tf.less(tf.range(T)[:, tf.newaxis, tf.newaxis],
seq_length_batch[:, tf.newaxis])
seq_not_finished = tf.cast(seq_not_finished, tf.float32)
self.seq_not_finished = seq_not_finished
def _build_decoder(self, use_gt_layout, gt_layout_batch, scope='decoder',
reuse=None):
# The main difference from before is that the decoders now takes another
# input (the attention) when computing the next step
# T_max is the maximum length of decoded sequence (including <eos>)
#
# This function is for decoding only. It performs greedy search or sampling.
# the first input is <go> (its embedding vector) and the subsequent inputs
# are the outputs from previous time step
# num_vocab does not include <go>
#
# use_gt_layout is None or a bool tensor, and gt_layout_batch is a tenwor
# with shape [T_max, N].
# If use_gt_layout is not None, then when use_gt_layout is true, predict
# exactly the tokens in gt_layout_batch, regardless of actual probability.
# Otherwise, if sampling is True, sample from the token probability
# If sampling is False, do greedy decoding (beam size 1)
N = self.N
encoder_states = self.encoder_states
T_max = self.T_decoder
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.decoder_dropout
EOS_token = self.EOS_token
sampling = self.decoder_sampling
with tf.variable_scope(scope, reuse=reuse):
embedding_mat = tf.get_variable('embedding_mat',
[self.decoder_num_vocab, self.decoder_embed_dim])
# we use a separate embedding for <go>, as it is only used in the
# beginning of the sequence
go_embedding = tf.get_variable('go_embedding', [1, self.decoder_embed_dim])
with tf.variable_scope('att_prediction'):
v = tf.get_variable('v', [lstm_dim])
W_a = tf.get_variable('weights', [lstm_dim, lstm_dim],
initializer=tf.contrib.layers.xavier_initializer())
b_a = tf.get_variable('biases', lstm_dim,
initializer=tf.constant_initializer(0.))
# The parameters to predict the next token
with tf.variable_scope('token_prediction'):
W_y = tf.get_variable('weights', [lstm_dim*2, self.decoder_num_vocab],
initializer=tf.contrib.layers.xavier_initializer())
b_y = tf.get_variable('biases', self.decoder_num_vocab,
initializer=tf.constant_initializer(0.))
# Attentional decoding
# Loop function is called at time t BEFORE the cell execution at time t,
# and its next_input is used as the input at time t (not t+1)
# c.f. https://www.tensorflow.org/api_docs/python/tf/nn/raw_rnn
mask_range = tf.reshape(
tf.range(self.decoder_num_vocab, dtype=tf.int32),
[1, -1])
all_eos_pred = EOS_token * tf.ones(to_T([N]), tf.int32)
all_one_prob = tf.ones(to_T([N]), tf.float32)
all_zero_entropy = tf.zeros(to_T([N]), tf.float32)
if use_gt_layout is not None:
gt_layout_mult = tf.cast(use_gt_layout, tf.int32)
pred_layout_mult = 1 - gt_layout_mult
def loop_fn(time, cell_output, cell_state, loop_state):
if cell_output is None: # time == 0
next_cell_state = encoder_states
next_input = tf.tile(go_embedding, to_T([N, 1]))
else: # time > 0
next_cell_state = cell_state
# compute the attention map over the input sequence
# a_raw has shape [T, N, 1]
att_raw = tf.reduce_sum(
tf.tanh(tf.nn.xw_plus_b(cell_output, W_a, b_a) +
self.encoder_h_transformed) * v,
axis=2, keep_dims=True)
# softmax along the first dimension (T) over not finished examples
# att has shape [T, N, 1]
att = tf.nn.softmax(att_raw, dim=0)*self.seq_not_finished
att = att / tf.reduce_sum(att, axis=0, keep_dims=True)
# d has shape [N, lstm_dim]
d2 = tf.reduce_sum(att*self.encoder_outputs, axis=0)
# token_scores has shape [N, num_vocab]
token_scores = tf.nn.xw_plus_b(
tf.concat([cell_output, d2], axis=1),
W_y, b_y)
# predict the next token (behavior depending on parameters)
if sampling:
# predicted_token has shape [N]
logits = token_scores
predicted_token = tf.cast(tf.reshape(
tf.multinomial(token_scores, 1), [-1]), tf.int32)
else:
# predicted_token has shape [N]
predicted_token = tf.cast(tf.argmax(token_scores, 1), tf.int32)
if use_gt_layout is not None:
predicted_token = (gt_layout_batch[time-1] * gt_layout_mult
+ predicted_token * pred_layout_mult)
# token_prob has shape [N], the probability of the predicted token
# although token_prob is not needed for predicting the next token
# it is needed in output (for policy gradient training)
# [N, num_vocab]
# mask has shape [N, num_vocab]
mask = tf.equal(mask_range, tf.reshape(predicted_token, [-1, 1]))
all_token_probs = tf.nn.softmax(token_scores)
token_prob = tf.reduce_sum(all_token_probs *
tf.cast(mask, tf.float32), axis=1)
neg_entropy = tf.reduce_sum(all_token_probs *
tf.log(tf.maximum(1e-5, all_token_probs)), axis=1)
# is_eos_predicted is a [N] bool tensor, indicating whether
# <eos> has already been predicted previously in each sequence
is_eos_predicted = loop_state[2]
predicted_token_old = predicted_token
# if <eos> has already been predicted, now predict <eos> with
# prob 1
predicted_token = tf.where(is_eos_predicted, all_eos_pred,
predicted_token)
token_prob = tf.where(is_eos_predicted, all_one_prob,
token_prob)
neg_entropy = tf.where(is_eos_predicted, all_zero_entropy, neg_entropy)
is_eos_predicted = tf.logical_or(is_eos_predicted,
tf.equal(predicted_token_old, EOS_token))
# the prediction is from the cell output of the last step
# timestep (t-1), feed it as input into timestep t
next_input = tf.nn.embedding_lookup(embedding_mat, predicted_token)
elements_finished = tf.greater_equal(time, T_max)
# loop_state is a 5-tuple, representing
# 1) the predicted_tokens
# 2) the prob of predicted_tokens
# 3) whether <eos> has already been predicted
# 4) the negative entropy of policy (accumulated across timesteps)
# 5) the attention
if loop_state is None: # time == 0
# Write the predicted token into the output
predicted_token_array = tf.TensorArray(dtype=tf.int32, size=T_max,
infer_shape=False)
token_prob_array = tf.TensorArray(dtype=tf.float32, size=T_max,
infer_shape=False)
att_array = tf.TensorArray(dtype=tf.float32, size=T_max,
infer_shape=False)
next_loop_state = (predicted_token_array,
token_prob_array,
tf.zeros(to_T([N]), dtype=tf.bool),
tf.zeros(to_T([N]), dtype=tf.float32),
att_array)
else: # time > 0
t_write = time-1
next_loop_state = (loop_state[0].write(t_write, predicted_token),
loop_state[1].write(t_write, token_prob),
is_eos_predicted,
loop_state[3] + neg_entropy,
loop_state[4].write(t_write, att))
return (elements_finished, next_input, next_cell_state, cell_output,
next_loop_state)
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
_, _, decodes_ta = tf.nn.raw_rnn(cell, loop_fn, scope='lstm')
predicted_tokens = decodes_ta[0].stack()
token_probs = decodes_ta[1].stack()
neg_entropy = decodes_ta[3]
# atts has shape [T_decoder, T_encoder, N, 1]
atts = decodes_ta[4].stack()
self.atts = atts
# word_vec has shape [T_decoder, N, 1]
word_vecs = tf.reduce_sum(atts*self.embedded_input_seq, axis=1)
predicted_tokens.set_shape([None, None])
token_probs.set_shape([None, None])
neg_entropy.set_shape([None])
word_vecs.set_shape([None, None, self.encoder_embed_dim])
self.predicted_tokens = predicted_tokens
self.token_probs = token_probs
self.neg_entropy = neg_entropy
self.word_vecs = word_vecs
| {
"repo_name": "ronghanghu/n2nmn",
"path": "models_shapes/nmn3_netgen_att.py",
"copies": "1",
"size": "14699",
"license": "bsd-2-clause",
"hash": -2274397952943021600,
"line_mean": 51.6845878136,
"line_max": 98,
"alpha_frac": 0.5407850874,
"autogenerated": false,
"ratio": 4.0051771117166215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0037696130783384883,
"num_lines": 279
} |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python import layers as tfl
from tensorflow_probability.python.internal import \
distribution_util as dist_util
from tensorflow_probability.python.layers import DistributionLambda
from tensorflow_probability.python.layers.distribution_layer import _event_size
from odin.bay.distributions import NegativeBinomialDisp, ZeroInflated
__all__ = [
'PoissonLayer', 'NegativeBinomialDispLayer', 'NegativeBinomialLayer',
'ZINegativeBinomialDispLayer', 'ZINegativeBinomialLayer', 'ZIPoissonLayer'
]
PoissonLayer = tfl.IndependentPoisson
class NegativeBinomialLayer(DistributionLambda):
"""An independent NegativeBinomial Keras layer.
Parameters
----------
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
given_log_count : boolean
is the input representing log count values or the count itself
dispersion : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single vector of dispersion for all examples, and
'single' uses a single value as dispersion for all data points.
Note: the dispersion in this case is the probability of success.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_shape=(),
given_log_count=True,
dispersion='full',
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
activity_regularizer=None,
**kwargs):
dispersion = str(dispersion).lower()
assert dispersion in ('full', 'single', 'share'), \
"Only support three different dispersion value: 'full', 'single' and " + \
"'share', but given: %s" % dispersion
super(NegativeBinomialLayer,
self).__init__(lambda t: type(self).new(
t, event_shape, given_log_count, dispersion, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params,
event_shape=(),
given_log_count=True,
dispersion='full',
validate_args=False,
name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'NegativeBinomialLayer',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
ndims = output_shape.shape[0]
total_count_params, logits_params = tf.split(params, 2, axis=-1)
if dispersion == 'single':
logits_params = tf.reduce_mean(logits_params)
elif dispersion == 'share':
logits_params = tf.reduce_mean(logits_params,
axis=tf.range(0,
ndims - 1,
dtype='int32'),
keepdims=True)
if given_log_count:
total_count_params = tf.exp(total_count_params, name='total_count')
return tfd.Independent(
tfd.NegativeBinomial(total_count=tf.reshape(total_count_params,
output_shape),
logits=tf.reshape(logits_params, output_shape)
if dispersion == 'full' else logits_params,
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
@staticmethod
def params_size(event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name, 'NegativeBinomial_params_size',
[event_shape]):
event_shape = tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32)
return 2 * _event_size(event_shape,
name=name or 'NegativeBinomial_params_size')
class NegativeBinomialDispLayer(DistributionLambda):
"""An alternative parameterization of the NegativeBinomial Keras layer.
Parameters
----------
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
given_log_mean : `bool`
is the input representing log mean values or the count mean itself
given_log_mean : `bool`
is the input representing log mean values or the count mean itself
dispersion : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single vector of dispersion for all examples, and
'single' uses a single value as dispersion for all data points.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_shape=(),
given_log_mean=True,
given_log_disp=True,
dispersion='full',
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
activity_regularizer=None,
**kwargs):
dispersion = str(dispersion).lower()
assert dispersion in ('full', 'single', 'share'), \
"Only support three different dispersion value: 'full', 'single' and " + \
"'share', but given: %s" % dispersion
super(NegativeBinomialDispLayer, self).__init__(
lambda t: type(self).new(t, event_shape, given_log_mean, given_log_disp,
dispersion, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params,
event_shape=(),
given_log_mean=True,
given_log_disp=True,
dispersion='full',
validate_args=False,
name=None):
""" Create the distribution instance from a `params` vector. """
with tf.compat.v1.name_scope(name, 'NegativeBinomialDispLayer',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
loc_params, disp_params = tf.split(params, 2, axis=-1)
if dispersion == 'single':
disp_params = tf.reduce_mean(disp_params)
elif dispersion == 'share':
disp_params = tf.reduce_mean(disp_params,
axis=tf.range(0,
output_shape.shape[0] - 1,
dtype='int32'),
keepdims=True)
if given_log_mean:
loc_params = tf.exp(loc_params, name='loc')
if given_log_disp:
disp_params = tf.exp(disp_params, name='disp')
return tfd.Independent(
NegativeBinomialDisp(loc=tf.reshape(loc_params, output_shape),
disp=tf.reshape(disp_params, output_shape)
if dispersion == 'full' else disp_params,
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
@staticmethod
def params_size(event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name, 'NegativeBinomialDisp_params_size',
[event_shape]):
event_shape = tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32)
return 2 * _event_size(event_shape,
name=name or 'NegativeBinomialDisp_params_size')
# ===========================================================================
# Zero inflated
# ===========================================================================
class ZIPoissonLayer(DistributionLambda):
"""A Independent zero-inflated Poisson keras layer
"""
def __init__(self,
event_shape=(),
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
activity_regularizer=None,
**kwargs):
super(ZIPoissonLayer, self).__init__(
lambda t: type(self).new(t, event_shape, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params, event_shape=(), validate_args=False, name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'ZIPoissonLayer', [params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
(log_rate_params, logits_params) = tf.split(params, 2, axis=-1)
zip = ZeroInflated(count_distribution=tfd.Poisson(
log_rate=tf.reshape(log_rate_params, output_shape),
validate_args=validate_args),
logits=tf.reshape(logits_params, output_shape),
validate_args=validate_args)
return tfd.Independent(
zip,
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
@staticmethod
def params_size(event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name,
'ZeroInflatedNegativeBinomial_params_size',
[event_shape]):
event_shape = tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32)
return 2 * _event_size(
event_shape, name=name or 'ZeroInflatedNegativeBinomial_params_size')
class ZINegativeBinomialLayer(DistributionLambda):
"""A Independent zero-inflated negative binomial keras layer
Parameters
----------
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
given_log_count : boolean
is the input representing log count values or the count itself
dispersion : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single vector of dispersion for all examples, and
'single' uses a single value as dispersion for all data points.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_shape=(),
given_log_count=True,
dispersion='full',
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
activity_regularizer=None,
**kwargs):
super(ZINegativeBinomialLayer,
self).__init__(lambda t: type(self).new(
t, event_shape, given_log_count, dispersion, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params,
event_shape=(),
given_log_count=True,
dispersion='full',
validate_args=False,
name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'ZINegativeBinomialLayer',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
ndims = output_shape.shape[0]
(total_count_params, logits_params, rate_params) = tf.split(params,
3,
axis=-1)
if dispersion == 'single':
logits_params = tf.reduce_mean(logits_params)
elif dispersion == 'share':
logits_params = tf.reduce_mean(logits_params,
axis=tf.range(0,
ndims - 1,
dtype='int32'),
keepdims=True)
if given_log_count:
total_count_params = tf.exp(total_count_params, name='total_count')
nb = tfd.NegativeBinomial(total_count=tf.reshape(total_count_params,
output_shape),
logits=tf.reshape(logits_params, output_shape)
if dispersion == 'full' else logits_params,
validate_args=validate_args)
zinb = ZeroInflated(count_distribution=nb,
logits=tf.reshape(rate_params, output_shape),
validate_args=validate_args)
return tfd.Independent(
zinb,
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
@staticmethod
def params_size(event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name,
'ZeroInflatedNegativeBinomial_params_size',
[event_shape]):
event_shape = tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32)
return 3 * _event_size(
event_shape, name=name or 'ZeroInflatedNegativeBinomial_params_size')
class ZINegativeBinomialDispLayer(DistributionLambda):
"""A Independent zero-inflated negative binomial (alternative
parameterization) keras layer
Parameters
----------
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
given_log_mean : boolean
is the input representing log count values or the count itself
given_log_disp : boolean
is the input representing log dispersion values
dispersion : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single vector of dispersion for all examples, and
'single' uses a single value as dispersion for all data points.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
def __init__(self,
event_shape=(),
given_log_mean=True,
given_log_disp=True,
dispersion='full',
convert_to_tensor_fn=tfd.Distribution.sample,
validate_args=False,
activity_regularizer=None,
**kwargs):
super(ZINegativeBinomialDispLayer, self).__init__(
lambda t: type(self).new(t, event_shape, given_log_mean, given_log_disp,
dispersion, validate_args),
convert_to_tensor_fn,
activity_regularizer=activity_regularizer,
**kwargs)
@staticmethod
def new(params,
event_shape=(),
given_log_mean=True,
given_log_disp=True,
dispersion='full',
validate_args=False,
name=None):
"""Create the distribution instance from a `params` vector."""
with tf.compat.v1.name_scope(name, 'ZINegativeBinomialDispLayer',
[params, event_shape]):
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(tf.convert_to_tensor(
value=event_shape, name='event_shape', dtype=tf.int32),
tensor_name='event_shape')
output_shape = tf.concat([
tf.shape(input=params)[:-1],
event_shape,
],
axis=0)
# splitting the parameters
(loc_params, disp_params, rate_params) = tf.split(params, 3, axis=-1)
if dispersion == 'single':
disp_params = tf.reduce_mean(disp_params)
elif dispersion == 'share':
disp_params = tf.reduce_mean(disp_params,
axis=tf.range(0,
output_shape.shape[0] - 1,
dtype='int32'),
keepdims=True)
# as count value, do exp if necessary
if given_log_mean:
loc_params = tf.exp(loc_params, name='loc')
if given_log_disp:
disp_params = tf.exp(disp_params, name='disp')
# create the distribution
nb = NegativeBinomialDisp(loc=tf.reshape(loc_params, output_shape),
disp=tf.reshape(disp_params, output_shape)
if dispersion == 'full' else disp_params,
validate_args=validate_args)
zinb = ZeroInflated(count_distribution=nb,
logits=tf.reshape(rate_params, output_shape),
validate_args=validate_args)
return tfd.Independent(
zinb,
reinterpreted_batch_ndims=tf.size(input=event_shape),
validate_args=validate_args)
@staticmethod
def params_size(event_shape=(), name=None):
"""The number of `params` needed to create a single distribution."""
with tf.compat.v1.name_scope(name, 'ZINegativeBinomialDisp_params_size',
[event_shape]):
event_shape = tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32)
return 3 * _event_size(event_shape,
name=name or 'ZINegativeBinomialDisp_params_size')
| {
"repo_name": "imito/odin",
"path": "odin/bay/distribution_layers/count_layers.py",
"copies": "1",
"size": "21219",
"license": "mit",
"hash": -4027891369750667000,
"line_mean": 43.2985386221,
"line_max": 80,
"alpha_frac": 0.5719873698,
"autogenerated": false,
"ratio": 4.318949725218808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5390937095018807,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.contrib import learn as tflearn
from tensorflow.contrib import layers as tflayers
def lstm_model(num_units, rnn_layers, dense_layers=None, learning_rate=0.1, optimizer='Adagrad'):
"""
Creates a deep model based on:
* stacked lstm cells
* an optional dense layers
:param num_units: the size of the cells.
:param rnn_layers: list of int or dict
* list of int: the steps used to instantiate the `BasicLSTMCell` cell
* list of dict: [{steps: int, keep_prob: int}, ...]
:param dense_layers: list of nodes for each layer
:return: the model definition
"""
def lstm_cells(layers):
if isinstance(layers[0], dict):
return [tf.contrib.rnn.DropoutWrapper(
tf.contrib.rnn.BasicLSTMCell(
layer['num_units'], state_is_tuple=True
),
layer['keep_prob']
) if layer.get('keep_prob') else tf.contrib.rnn.BasicLSTMCell(
layer['num_units'],
state_is_tuple=True
) for layer in layers
]
return [tf.contrib.rnn.BasicLSTMCell(steps, state_is_tuple=True) for steps in layers]
def dnn_layers(input_layers, layers):
if layers and isinstance(layers, dict):
return tflayers.stack(input_layers, tflayers.fully_connected,
layers['layers'],
activation=layers.get('activation'),
dropout=layers.get('dropout'))
elif layers:
return tflayers.stack(input_layers, tflayers.fully_connected, layers)
else:
return input_layers
def _lstm_model(X, y):
stacked_lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells(rnn_layers), state_is_tuple=True)
x_ = tf.unstack(X, axis=1, num=num_units)
output, layers = tf.contrib.rnn.static_rnn(stacked_lstm, x_, dtype=dtypes.float32)
output = dnn_layers(output[-1], dense_layers)
prediction, loss = tflearn.models.linear_regression(output, y)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer=optimizer,
learning_rate=learning_rate)
return prediction, loss, train_op
return _lstm_model
| {
"repo_name": "irontarkus95/MET-Oracle-lstm-weather-prediction",
"path": "LSTM_RCNN/lstm.py",
"copies": "1",
"size": "2469",
"license": "mit",
"hash": -932313319185495000,
"line_mean": 41.5689655172,
"line_max": 97,
"alpha_frac": 0.6115836371,
"autogenerated": false,
"ratio": 4.054187192118227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5165770829218227,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import polyaxon_lib as plx
from polyaxon_schemas.losses import SoftmaxCrossEntropyConfig
from polyaxon_schemas.metrics import AccuracyConfig
from polyaxon_schemas.optimizers import AdamConfig
def graph_fn(mode, features):
x = plx.layers.Embedding(input_dim=10000, output_dim=128)(features['source_token'])
x = plx.layers.LSTM(units=128, dropout=0.2, recurrent_dropout=0.2)(x)
x = plx.layers.Dense(units=2)(x)
return x
def model_fn(features, labels, params, mode, config):
model = plx.models.Classifier(
mode=mode,
graph_fn=graph_fn,
loss=SoftmaxCrossEntropyConfig(),
optimizer=AdamConfig(learning_rate=0.001),
metrics=[AccuracyConfig()],
summaries='all',
one_hot_encode=True,
n_classes=2)
return model(features=features, labels=labels, params=params, config=config)
def experiment_fn(output_dir):
"""Creates an experiment using LSTM architecture to classify IMDB sentiment dataset."""
dataset_dir = '../data/imdb'
plx.datasets.imdb.prepare(dataset_dir)
train_input_fn, eval_input_fn = plx.datasets.imdb.create_input_fn(dataset_dir)
experiment = plx.experiments.Experiment(
estimator=plx.estimators.Estimator(model_fn=model_fn, model_dir=output_dir),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=10000,
eval_steps=10)
return experiment
def main(*args):
plx.experiments.run_experiment(experiment_fn=experiment_fn,
output_dir="/tmp/polyaxon_logs/imdb_lsmt",
schedule='continuous_train_and_eval')
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| {
"repo_name": "polyaxon/polyaxon-api",
"path": "examples/programatic_examples/imdb_sentiment_lstm.py",
"copies": "1",
"size": "1839",
"license": "mit",
"hash": 4223640110791726000,
"line_mean": 32.4363636364,
"line_max": 91,
"alpha_frac": 0.6742794997,
"autogenerated": false,
"ratio": 3.5028571428571427,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46771366425571426,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import tensorflow.contrib.layers as layers
from deep_rl.misc import slice_2d, get_vars_from_scope
def create_a3c_graph(input_shape, n_action, model, opt, beta=None, name='a3c'):
"""
Implements Actor Critic Model (A3C)
Returns a dictionary of Tensorflow graph operations to be with a
tf.Session instance.
Args:
n_action: A `int`. Number of actions agent can do.
model: The Tensorflow model
opt: A `tf.train.Optimizer`.
beta: A `float`. Regularization term for the entropy of the policy model.
If beta is `None` no regularization will be added.
"""
actions = tf.placeholder(tf.int32, shape=(None))
returns = tf.placeholder(tf.float32, shape=(None))
policy_in = tf.placeholder(tf.float32, shape=input_shape)
value_in = tf.placeholder(tf.float32, shape=input_shape)
tf.add_to_collection("actions", actions)
tf.add_to_collection("returns", returns)
tf.add_to_collection("policy_in", policy_in)
tf.add_to_collection("value_in", value_in)
with tf.variable_scope('actor'):
pnn = model(policy_in)
probs = tf.nn.softmax(layers.fully_connected(pnn, n_action))
with tf.variable_scope('critic'):
v_out = model(value_in)
value = layers.fully_connected(v_out, 1)
tf.add_to_collection("policy_out", probs)
tf.add_to_collection("value_out", value)
actor_vars = get_vars_from_scope('actor')
critic_vars = get_vars_from_scope('critic')
N = tf.shape(states)[0]
p_vals = slice_2d(probs, tf.range(0, N), actions)
surr_loss = tf.log(p_vals + 1e-8)
policy_loss = -surr_loss * (returns - value)
if beta:
policy_loss += beta * (-tf.reduce_sum(probs * tf.log(probs + 1e-8), 1))
policy_loss = tf.reduce_mean(policy_loss, name="policy_loss")
value_loss = tf.reduce_mean(tf.square(returns - value), name="value_loss")
policy_train_op = opt.minimize(policy_loss, var_list=actor_vars)
value_train_op = opt.minimize(value_loss, var_list=critic_vars)
tf.add_to_collection("policy_loss", policy_loss)
tf.add_to_collection("value_loss", value_loss)
tf.add_to_collection("policy_train_op", policy_train_op)
tf.add_to_collection("value_train_op", value_train_op)
| {
"repo_name": "domluna/deep_rl",
"path": "deep_rl/graphs/a3c.py",
"copies": "1",
"size": "2340",
"license": "mit",
"hash": 1009008459853267300,
"line_mean": 36.7419354839,
"line_max": 81,
"alpha_frac": 0.6666666667,
"autogenerated": false,
"ratio": 3.20109439124487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.436776105794487,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import torch
from tensorflow import nest
from tensorflow.python import keras
from odin import backend as bk
# ===========================================================================
# Base classe
# ===========================================================================
class BaseAttention(keras.layers.Layer):
pass
# ===========================================================================
# Attention classes
# ===========================================================================
class SoftAttention(BaseAttention):
""" Original implementation from Tensorflow:
`tensorflow/python/keras/layers/dense_attention.py`
Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Base Attention class, modified for supporting:
- Multi-head attention
- Self-attention mechanism
- Using more odin.backend function to make it easier transfer between
tensorflow and pytorch
The meaning of `query`, `value` and `key` depend on the application. In the
case of text similarity, for example, `query` is the sequence embeddings of
the first piece of text and `value` is the sequence embeddings of the second
piece of text. Hence, the attention determines alignment between `query` and
`value`, `key` is usually the same tensor as value.
Args:
causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
that position `i` cannot attend to positions `j > i`. This prevents the
flow of information from the future towards the past.
return_score: Boolean. Set to `True` for returning the attention scores.
Call Arguments:
inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
* value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
* key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
given, will use `value` for both `key` and `value`, which is the
most common case.
mask: List of the following tensors:
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
Output shape:
Attention outputs of shape `[batch_size, Tq, dim]`.
References:
[1] Graves, A., et al., 2014. Neural Turing Machines. arXiv:1410.5401 [cs].
[2] Xu, K., et al., 2015. Show, Attend and Tell: Neural Image Caption
Generation with Visual Attention. arXiv:1502.03044 [cs].
[3] Luong, M.T., et al., 2015. Effective Approaches to Attention-based
Neural Machine Translation. arXiv:1508.04025 [cs].
[4] Kim, Y., Denton, C., Hoang, L., Rush, A.M., 2017. Structured Attention
Networks. arXiv:1702.00887 [cs].
[5] Vaswani, A., et al., 2017. Attention Is All You Need.
arXiv:1706.03762 [cs].
"""
def __init__(self,
input_dim=None,
causal=False,
return_score=False,
multihead_norm=0.,
scale_initializer='one',
scale_tied=True,
attention_type='mul',
attention_activation='softmax',
**kwargs):
super(SoftAttention, self).__init__(**kwargs)
self.causal = causal
self.return_score = bool(return_score)
self.multihead_norm = multihead_norm
self.supports_masking = True
# ====== initialize scale ====== #
if not scale_tied and input_dim is None:
raise ValueError("If scale_tied=False, the input_dim must be given")
scale = 1
if scale_initializer is not None:
scale = bk.parse_initializer(scale_initializer, self)
if scale_tied:
scale = bk.variable(initial_value=scale(()),
trainable=True,
framework=self)
else:
scale = bk.variable(initial_value=scale(nest.flatten(input_dim)),
trainable=True,
framework=self)
self.attention_scale = scale
self.attention_type = str(attention_type).strip().lower()
self.attention_activation = bk.parse_activation(attention_activation, self)
def calculate_scores(self, query, key):
"""Calculates attention scores (a.k.a logits values).
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
if self.attention_type == 'mul':
# TODO: this might be wrong
return bk.matmul(self.attention_scale * query, bk.swapaxes(key, 1, 2))
elif self.attention_type == 'add':
# [batch_size, Tq, 1, dim]
q = bk.expand_dims(query, axis=2)
# [batch_size, 1, Tv, dim]
k = bk.expand_dims(key, axis=1)
return bk.reduce_sum(self.attention_scale * bk.tanh(q + k), axis=-1)
else:
raise NotImplementedError("No support for attention_type='%s'" %
self.attention_type)
def _apply_scores(self, scores, value, scores_mask=None):
"""Applies attention scores to the given value tensor.
To use this method in your attention layer, follow the steps:
* Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape
`[batch_size, Tv]` to calculate the attention `scores`.
* Pass `scores` and `value` tensors to this method. The method applies
`scores_mask`, calculates `attention_distribution = softmax(scores)`, then
returns `matmul(attention_distribution, value).
* Apply `query_mask` and return the result.
Args:
scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.
value: Value tensor of shape `[batch_size, Tv, dim]`.
scores_mask: A boolean mask `Tensor` of shape `[batch_size, 1, Tv]` or
`[batch_size, Tq, Tv]`. If given, scores at positions where
`scores_mask==False` do not contribute to the result. It must contain
at least one `True` value in each line along the last dimension.
Returns:
Tensor of shape `[batch_size, Tq, dim]`.
"""
if scores_mask is not None:
padding_mask = bk.logical_not(scores_mask)
# Bias so padding positions do not contribute to attention distribution.
scores -= 1.e9 * bk.cast(padding_mask, dtype=scores.dtype)
# [batch_size, Tq, Tv]
attention_distribution = self.attention_activation(scores)
return bk.matmul(attention_distribution, value)
def call(self, query, value=None, key=None, mask=None, training=None):
if value is None:
value = query
if key is None:
key = value
with bk.framework_(self):
query = bk.array(query)
key = bk.array(key)
value = bk.array(value)
scores = self.calculate_scores(query=query, key=key)
# ====== prepare the mask ====== #
q_mask = mask[0] if mask else None
v_mask = mask[1] if mask else None
if v_mask is not None:
# Mask of shape [batch_size, 1, Tv].
v_mask = bk.expand_dims(v_mask, axis=-2)
if self.causal:
# Creates a lower triangular mask, so position i cannot attend to
# positions j>i. This prevents the flow of information from the future
# into the past.
scores_shape = scores.shape
# causal_mask_shape = [1, Tq, Tv].
causal_mask_shape = bk.concatenate(
[bk.ones_like(scores_shape[:-2]), scores_shape[-2:]], axis=0)
causal_mask = bk.tril_mask(causal_mask_shape)
else:
causal_mask = None
scores_mask = bk.logical_and(v_mask, causal_mask)
# ====== applying the attention ====== #
result = self._apply_scores(scores=scores,
value=value,
scores_mask=scores_mask)
# ====== applying the mask ====== #
if q_mask is not None:
# Mask of shape [batch_size, Tq, 1].
q_mask = bk.expand_dims(q_mask, axis=-1)
result *= bk.cast(q_mask, dtype=result.dtype)
return result
def compute_mask(self, inputs, mask=None):
with bk.framework_(self):
if mask:
q_mask = mask[0]
if q_mask is None:
return None
return bk.array(q_mask)
return None
def get_config(self):
config = {'causal': self.causal}
base_config = super(BaseAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# ===========================================================================
# Soft and Hard attention
# ===========================================================================
class HardAttention(SoftAttention):
pass
| {
"repo_name": "imito/odin",
"path": "odin/networks/attention.py",
"copies": "1",
"size": "8902",
"license": "mit",
"hash": 4074157343303613000,
"line_mean": 38.0438596491,
"line_max": 80,
"alpha_frac": 0.594810155,
"autogenerated": false,
"ratio": 3.8403796376186365,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49351897926186367,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
def conv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
# input has shape [batch, in_height, in_width, in_channels]
input_dim = bottom.get_shape().as_list()[-1]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# filter has shape [filter_height, filter_width, in_channels, out_channels]
weights = tf.get_variable("weights",
[kernel_size, kernel_size, input_dim, output_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
conv = tf.nn.conv2d(bottom, filter=weights,
strides=[1, stride, stride, 1], padding=padding)
if bias_term:
conv = tf.nn.bias_add(conv, biases)
return conv
def conv_relu_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
conv = conv_layer(name, bottom, kernel_size, stride, output_dim, padding,
bias_term, weights_initializer, biases_initializer, reuse=reuse)
relu = tf.nn.relu(conv)
return relu
def deconv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None):
# input_shape is [batch, in_height, in_width, in_channels]
input_shape = bottom.get_shape().as_list()
batch_size, input_height, input_width, input_dim = input_shape
output_shape = [batch_size, input_height*stride, input_width*stride, output_dim]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# filter has shape [filter_height, filter_width, out_channels, in_channels]
weights = tf.get_variable("weights",
[kernel_size, kernel_size, output_dim, input_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
deconv = tf.nn.conv2d_transpose(bottom, filter=weights,
output_shape=output_shape, strides=[1, stride, stride, 1],
padding=padding)
if bias_term:
deconv = tf.nn.bias_add(deconv, biases)
return deconv
def deconv_relu_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
deconv = deconv_layer(name, bottom, kernel_size, stride, output_dim, padding,
bias_term, weights_initializer, biases_initializer, reuse=reuse)
relu = tf.nn.relu(deconv)
return relu
def pooling_layer(name, bottom, kernel_size, stride):
pool = tf.nn.max_pool(bottom, ksize=[1, kernel_size, kernel_size, 1],
strides=[1, stride, stride, 1], padding='SAME', name=name)
return pool
def fc_layer(name, bottom, output_dim, bias_term=True, weights_initializer=None,
biases_initializer=None, reuse=None):
# flatten bottom input
# input has shape [batch, in_height, in_width, in_channels]
shape = bottom.get_shape().as_list()
input_dim = 1
for d in shape[1:]:
input_dim *= d
flat_bottom = tf.reshape(bottom, [-1, input_dim])
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# weights has shape [input_dim, output_dim]
weights = tf.get_variable("weights", [input_dim, output_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
if bias_term:
fc = tf.nn.xw_plus_b(flat_bottom, weights, biases)
else:
fc = tf.matmul(flat_bottom, weights)
return fc
def fc_relu_layer(name, bottom, output_dim, bias_term=True,
weights_initializer=None, biases_initializer=None, reuse=None):
fc = fc_layer(name, bottom, output_dim, bias_term, weights_initializer,
biases_initializer, reuse=reuse)
relu = tf.nn.relu(fc)
return relu
| {
"repo_name": "ronghanghu/n2nmn",
"path": "util/cnn.py",
"copies": "1",
"size": "5605",
"license": "bsd-2-clause",
"hash": 4749091447886653000,
"line_mean": 43.4841269841,
"line_max": 101,
"alpha_frac": 0.6381801963,
"autogenerated": false,
"ratio": 3.805159538357094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4943339734657094,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from deep_rl.misc.utils import get_vars_from_scope, slice_2d
def create_dqn_graph(n_action, model, opt, gamma=0.99):
"""
Implements Deep Q-Learning
if terminal:
y = r
else:
y = r + gamma * max_a' Q(s', a', theta-)
L = (y - Q(s, a; theta)) ** 2
"""
actions = tf.placeholder(tf.int32)
terminals = tf.placeholder(tf.bool)
rewards = tf.placeholder(tf.float32)
with tf.variable_scope('policy'):
p_in, p_out = model()
with tf.variable_scope('target'):
t_in, t_out = model()
p_vars = get_vars_from_scope('policy')
t_vars = get_vars_from_scope('target')
mask = (tf.cast(tf.logical_not(terminals), tf.float32))
y = rewards + mask * gamma * tf.reduce_max(t_out, 1)
N = tf.shape(p_in)[0]
policy_probs = tf.nn.softmax(p_out)
p_vals = slice_2d(policy_probs, tf.range(0, N), actions)
loss_op = tf.reduce_mean(tf.square(y - p_vals))
train_op = opt.minimize(loss_op, var_list=p_vars)
update_targets_op = [tf.assign(tv, pv) for (tv, pv) in zip(t_vars, p_vars)]
return dict(
# inputs
policy_input=p_in,
target_input=t_in,
actions=actions,
rewards=rewards,
terminals=terminals,
# outputs
policy_qvals=p_out,
target_qvals=t_out,
policy_probs=policy_probs,
loss_op=loss_op,
train_op=train_op,
# misc
update_targets_op=update_targets_op)
| {
"repo_name": "domluna/deep_rl",
"path": "deep_rl/graphs/dqn.py",
"copies": "1",
"size": "1550",
"license": "mit",
"hash": 6630909429152993000,
"line_mean": 26.1929824561,
"line_max": 79,
"alpha_frac": 0.5909677419,
"autogenerated": false,
"ratio": 3.0155642023346303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41065319442346304,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from deep_rl.misc.utils import get_vars_from_scope, slice_2d
def create_vpg_graph(n_action, policy_model, value_model, policy_opt, value_opt):
"""
Implements Vanilla Policy Gradient
"""
actions = tf.placeholder(tf.int32, shape=(None), name="actions")
advantages = tf.placeholder(tf.float32, shape=(None), name="advantages")
returns = tf.placeholder(tf.float32, shape=(None), name="returns")
with tf.variable_scope('policy'):
p_input, probs = policy_model()
with tf.variable_scope('value'):
v_input, value = value_model()
p_vars = get_vars_from_scope('policy')
v_vars = get_vars_from_scope('value')
N = tf.shape(p_input)[0]
p_vals = slice_2d(probs, tf.range(0, N), actions)
surr_loss = -tf.log(p_vals)
pf_loss_op = tf.reduce_mean(surr_loss * advantages, name="pf_loss_op")
pf_train_op = policy_opt.minimize(pf_loss_op, var_list=p_vars, name="pf_train_op")
vf_loss_op = tf.reduce_mean((value - returns)**2)
vf_train_op = value_opt.minimize(vf_loss_op, var_list=v_vars, name="vf_train_op")
return dict(actions=actions,
advantages=advantages,
returns=returns,
policy_input=p_input,
probs=probs,
value_input=v_input,
value=value,
policy_train_op=pf_train_op,
value_train_op=vf_train_op)
| {
"repo_name": "domluna/deep_rl",
"path": "deep_rl/graphs/vpg.py",
"copies": "1",
"size": "1497",
"license": "mit",
"hash": 3308588417870942000,
"line_mean": 34.6428571429,
"line_max": 86,
"alpha_frac": 0.6199064796,
"autogenerated": false,
"ratio": 3.198717948717949,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9314419990008243,
"avg_score": 0.0008408876619411172,
"num_lines": 42
} |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
def full_connect(inputs,
weights_shape,
biases_shape,
is_train=True,
FLAGS=None):
"""
Define full-connect layer with reused Variables.
"""
weights = tf.get_variable(
"weights", weights_shape, initializer=tf.random_normal_initializer())
biases = tf.get_variable(
"biases", biases_shape, initializer=tf.random_normal_initializer())
layer = tf.matmul(inputs, weights) + biases
if FLAGS.enable_bn and is_train:
mean, var = tf.nn.moments(layer, axes=[0])
scale = tf.get_variable(
"scale", biases_shape, initializer=tf.random_normal_initializer())
shift = tf.get_variable(
"shift", biases_shape, initializer=tf.random_normal_initializer())
layer = tf.nn.batch_normalization(layer, mean, var, shift, scale,
FLAGS.bn_epsilon)
return layer
def full_connect_relu(inputs,
weights_shape,
biases_shape,
is_train=True,
FLAGS=None):
"""
Define full-connect layer and activation function with reused Variables.
"""
layer = full_connect(inputs, weights_shape, biases_shape, is_train, FLAGS)
layer = tf.nn.relu(layer)
return layer
def customized_inference(inputs,
input_units,
output_units,
is_train=True,
FLAGS=None):
"""
Define the customed model.
"""
hidden1_units = 128
hidden2_units = 32
hidden3_units = 8
with tf.variable_scope("input_layer"):
layer = full_connect_relu(inputs, [input_units, hidden1_units],
[hidden1_units], is_train, FLAGS)
with tf.variable_scope("layer_0"):
layer = full_connect_relu(layer, [hidden1_units, hidden2_units],
[hidden2_units], is_train, FLAGS)
with tf.variable_scope("layer_1"):
layer = full_connect_relu(layer, [hidden2_units, hidden3_units],
[hidden3_units], is_train, FLAGS)
if FLAGS.enable_dropout and is_train:
layer = tf.nn.dropout(layer, FLAGS.dropout_keep_prob)
with tf.variable_scope("output_layer"):
layer = full_connect(layer, [hidden3_units, output_units], [output_units],
is_train, FLAGS)
return layer
def dnn_inference(inputs, input_units, output_units, is_train=True,
FLAGS=None):
"""
Define the DNN model.
"""
# Example: [128, 64, 32, 16]
model_network_hidden_units = [int(i) for i in FLAGS.dnn_struct.split()]
with tf.variable_scope("input_layer"):
layer = full_connect_relu(inputs,
[input_units, model_network_hidden_units[0]],
[model_network_hidden_units[0]], is_train, FLAGS)
for i in range(len(model_network_hidden_units) - 1):
with tf.variable_scope("layer_{}".format(i)):
layer = full_connect_relu(layer, [
model_network_hidden_units[i], model_network_hidden_units[i + 1]
], [model_network_hidden_units[i + 1]], is_train, FLAGS)
with tf.variable_scope("output_layer"):
layer = full_connect(layer, [model_network_hidden_units[-1], output_units],
[output_units], is_train, FLAGS)
return layer
def lr_inference(inputs, input_units, output_units, is_train=True, FLAGS=None):
"""
Define the linear regression model.
"""
with tf.variable_scope("lr"):
layer = full_connect(inputs, [input_units, output_units], [output_units],
FLAGS)
return layer
def wide_and_deep_inference(inputs,
input_units,
output_units,
is_train=True,
FLAGS=None):
"""
Define the wide-and-deep model.
"""
return lr_inference(inputs, input_units,
output_units, is_train, FLAGS) + dnn_inference(
inputs, input_units, output_units, is_train, FLAGS)
def cnn_inference(inputs, input_units, output_units, is_train=True,
FLAGS=None):
"""
Define the CNN model.
"""
# [BATCH_SIZE, 9] -> [BATCH_SIZE, 3, 3, 1]
inputs = tf.reshape(inputs, [-1, 3, 3, 1])
# [BATCH_SIZE, 3, 3, 1] -> [BATCH_SIZE, 3, 3, 8]
with tf.variable_scope("conv_0"):
weights = tf.get_variable(
"weights", [3, 3, 1, 8], initializer=tf.random_normal_initializer())
bias = tf.get_variable(
"bias", [8], initializer=tf.random_normal_initializer())
layer = tf.nn.conv2d(inputs, weights, strides=[1, 1, 1, 1], padding="SAME")
layer = tf.nn.bias_add(layer, bias)
layer = tf.nn.relu(layer)
# [BATCH_SIZE, 3, 3, 8] -> [BATCH_SIZE, 3 * 3 * 8]
layer = tf.reshape(layer, [-1, 3 * 3 * 8])
# [BATCH_SIZE, 3 * 3 * 8] -> [BATCH_SIZE, LABEL_SIZE]
with tf.variable_scope("output_layer"):
weights = tf.get_variable(
"weights", [3 * 3 * 8, FLAGS.label_size],
initializer=tf.random_normal_initializer())
bias = tf.get_variable(
"bias", [FLAGS.label_size], initializer=tf.random_normal_initializer())
layer = tf.add(tf.matmul(layer, weights), bias)
return layer
def customized_cnn_inference(inputs,
input_units,
output_units,
is_train=True,
FLAGS=None):
"""
Define the CNN model.
"""
# TODO: Change if validate_batch_size is different
# [BATCH_SIZE, 512 * 512 * 1] -> [BATCH_SIZE, 512, 512, 1]
inputs = tf.reshape(inputs, [FLAGS.train_batch_size, 512, 512, 1])
# [BATCH_SIZE, 512, 512, 1] -> [BATCH_SIZE, 128, 128, 8]
with tf.variable_scope("conv0"):
weights = tf.get_variable(
"weights", [3, 3, 1, 8], initializer=tf.random_normal_initializer())
bias = tf.get_variable(
"bias", [8], initializer=tf.random_normal_initializer())
layer = tf.nn.conv2d(inputs, weights, strides=[1, 1, 1, 1], padding="SAME")
layer = tf.nn.bias_add(layer, bias)
layer = tf.nn.relu(layer)
layer = tf.nn.max_pool(
layer, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding="SAME")
# [BATCH_SIZE, 128, 128, 8] -> [BATCH_SIZE, 32, 32, 8]
with tf.variable_scope("conv1"):
weights = tf.get_variable(
"weights", [3, 3, 8, 8], initializer=tf.random_normal_initializer())
bias = tf.get_variable(
"bias", [8], initializer=tf.random_normal_initializer())
layer = tf.nn.conv2d(layer, weights, strides=[1, 1, 1, 1], padding="SAME")
layer = tf.nn.bias_add(layer, bias)
layer = tf.nn.relu(layer)
layer = tf.nn.max_pool(
layer, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding="SAME")
# [BATCH_SIZE, 32, 32, 8] -> [BATCH_SIZE, 8, 8, 8]
with tf.variable_scope("conv2"):
weights = tf.get_variable(
"weights", [3, 3, 8, 8], initializer=tf.random_normal_initializer())
bias = tf.get_variable(
"bias", [8], initializer=tf.random_normal_initializer())
layer = tf.nn.conv2d(layer, weights, strides=[1, 1, 1, 1], padding="SAME")
layer = tf.nn.bias_add(layer, bias)
layer = tf.nn.relu(layer)
layer = tf.nn.max_pool(
layer, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding="SAME")
# [BATCH_SIZE, 8, 8, 8] -> [BATCH_SIZE, 8 * 8 * 8]
layer = tf.reshape(layer, [-1, 8 * 8 * 8])
# [BATCH_SIZE, 8 * 8 * 8] -> [BATCH_SIZE, LABEL_SIZE]
with tf.variable_scope("output"):
weights = tf.get_variable(
"weights", [8 * 8 * 8, FLAGS.label_size],
initializer=tf.random_normal_initializer())
bias = tf.get_variable(
"bias", [FLAGS.label_size], initializer=tf.random_normal_initializer())
layer = tf.add(tf.matmul(layer, weights), bias)
return layer
def lstm_inference(inputs,
input_units,
output_units,
is_train=True,
FLAGS=None):
RNN_HIDDEN_UNITS = 128
timesteps = 3
number_input = 3
weights = tf.Variable(tf.random_normal([RNN_HIDDEN_UNITS, output_units]))
biases = tf.Variable(tf.random_normal([output_units]))
# [BATCH_SIZE, 9] -> [BATCH_SIZE, 3, 3]
x = tf.reshape(inputs, [-1, timesteps, number_input])
# [BATCH_SIZE, 3, 3] -> 3 * [BATCH_SIZE, 3]
x = tf.unstack(x, timesteps, 1)
# output size is 128, state size is (c=128, h=128)
lstm_cell = tf.contrib.rnn.BasicLSTMCell(RNN_HIDDEN_UNITS, forget_bias=1.0)
# outputs is array of 3 * [BATCH_SIZE, 3]
outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# outputs[-1] is [BATCH_SIZE, 3]
layer = tf.matmul(outputs[-1], weights) + biases
return layer
def bidirectional_lstm_inference(inputs,
input_units,
output_units,
is_train=True,
FLAGS=None):
RNN_HIDDEN_UNITS = 128
timesteps = 3
number_input = 3
weights = tf.Variable(tf.random_normal([RNN_HIDDEN_UNITS, output_units]))
biases = tf.Variable(tf.random_normal([output_units]))
# [BATCH_SIZE, 9] -> [BATCH_SIZE, 3, 3]
x = tf.reshape(inputs, [-1, timesteps, number_input])
# [BATCH_SIZE, 3, 3] -> 3 * [BATCH_SIZE, 3]
x = tf.unstack(x, timesteps, 1)
# Update the hidden units for bidirection-rnn
fw_lstm_cell = tf.contrib.rnn.BasicLSTMCell(
RNN_HIDDEN_UNITS / 2, forget_bias=1.0)
bw_lstm_cell = tf.contrib.rnn.BasicLSTMCell(
RNN_HIDDEN_UNITS / 2, forget_bias=1.0)
outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(
fw_lstm_cell, bw_lstm_cell, x, dtype=tf.float32)
# outputs[-1] is [BATCH_SIZE, 3]
layer = tf.matmul(outputs[-1], weights) + biases
return layer
def gru_inference(inputs, input_units, output_units, is_train=True,
FLAGS=None):
RNN_HIDDEN_UNITS = 128
timesteps = 3
number_input = 3
weights = tf.Variable(tf.random_normal([RNN_HIDDEN_UNITS, output_units]))
biases = tf.Variable(tf.random_normal([output_units]))
# [BATCH_SIZE, 9] -> [BATCH_SIZE, 3, 3]
x = tf.reshape(inputs, [-1, timesteps, number_input])
# [BATCH_SIZE, 3, 3] -> 3 * [BATCH_SIZE, 3]
x = tf.unstack(x, timesteps, 1)
# output size is 128, state size is (c=128, h=128)
lstm_cell = tf.contrib.rnn.GRUCell(RNN_HIDDEN_UNITS)
# outputs is array of 3 * [BATCH_SIZE, 3]
outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# outputs[-1] is [BATCH_SIZE, 3]
layer = tf.matmul(outputs[-1], weights) + biases
return layer
def compute_softmax_and_accuracy(logits, labels):
"""
Compute the softmax and accuracy of the logits and labels.
Args:
logits: The logits from the model.
labels: The labels.
Return:
The softmax op and accuracy op.
"""
softmax_op = tf.nn.softmax(logits)
correct_prediction_op = tf.equal(tf.argmax(softmax_op, 1), labels)
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction_op, tf.float32))
return softmax_op, accuracy_op
def compute_auc(softmax_op, label_op, label_size):
"""
Compute the auc of the softmax result and labels.
Args:
softmax_op: The softmax op.
label_op: The label op.
label_size: The label size.
Return:
The auc op.
"""
batch_labels = tf.cast(label_op, tf.int32)
sparse_labels = tf.reshape(batch_labels, [-1, 1])
derived_size = tf.shape(batch_labels)[0]
indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1])
concated = tf.concat(axis=1, values=[indices, sparse_labels])
outshape = tf.stack([derived_size, label_size])
new_batch_labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0)
_, auc_op = tf.contrib.metrics.streaming_auc(softmax_op, new_batch_labels)
return auc_op
| {
"repo_name": "tobegit3hub/deep_recommend_system",
"path": "model.py",
"copies": "1",
"size": "11879",
"license": "apache-2.0",
"hash": 3849776703830879000,
"line_mean": 32.1815642458,
"line_max": 79,
"alpha_frac": 0.5940735752,
"autogenerated": false,
"ratio": 3.3255879059350506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9333705466189677,
"avg_score": 0.017191202989074804,
"num_lines": 358
} |
from __future__ import absolute_import, division, print_function
import textwrap
from distutils.version import LooseVersion
from collections import Iterator
import sys
import traceback
from contextlib import contextmanager
import warnings
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.common import is_datetime64tz_dtype
from pandas.api.types import is_categorical_dtype, is_scalar
import toolz
from ..core import get_deps
from ..async import get_sync
PANDAS_VERSION = LooseVersion(pd.__version__)
def shard_df_on_index(df, divisions):
""" Shard a DataFrame by ranges on its index
Examples
--------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
>>> list(shard_df_on_index(df, []))[0] # empty case
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
"""
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
index = df.index
if is_categorical_dtype(index):
index = index.as_ordered()
indices = index.searchsorted(divisions)
yield df.iloc[:indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i]: indices[i + 1]]
yield df.iloc[indices[-1]:]
def unique(divisions):
""" Polymorphic unique function
>>> list(unique([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> unique(np.array([1, 2, 3, 1, 2, 3]))
array([1, 2, 3])
>>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))
[Alice, Bob]
Categories (2, object): [Alice, Bob]
"""
if isinstance(divisions, np.ndarray):
return np.unique(divisions)
if isinstance(divisions, pd.Categorical):
return pd.Categorical.from_codes(np.unique(divisions.codes),
divisions.categories,
divisions.ordered)
if isinstance(divisions, (tuple, list, Iterator)):
return tuple(toolz.unique(divisions))
raise NotImplementedError()
_META_TYPES = "meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional"
_META_DESCRIPTION = """\
An empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and
column names of the output. This metadata is necessary for many algorithms
in dask dataframe to work. For ease of use, some alternative inputs are
also available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}``
or iterable of ``(name, dtype)`` can be provided. Instead of a series, a
tuple of ``(name, dtype)`` can be used. If not provided, dask will try to
infer the metadata. This may lead to unexpected results, so providing
``meta`` is recommended. For more information, see
``dask.dataframe.utils.make_meta``.
"""
def insert_meta_param_description(*args, **kwargs):
"""Replace `$META` in docstring with param description.
If pad keyword is provided, will pad description by that number of
spaces (default is 8)."""
if not args:
return lambda f: insert_meta_param_description(f, **kwargs)
f = args[0]
if f.__doc__:
indent = " " * kwargs.get('pad', 8)
body = textwrap.wrap(_META_DESCRIPTION, initial_indent=indent,
subsequent_indent=indent, width=78)
descr = '{0}\n{1}'.format(_META_TYPES, '\n'.join(body))
f.__doc__ = f.__doc__.replace('$META', descr)
return f
@contextmanager
def raise_on_meta_error(funcname=None):
"""Reraise errors in this block to show metadata inference failure.
Parameters
----------
funcname : str, optional
If provided, will be added to the error message to indicate the
name of the method that failed.
"""
try:
yield
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
msg = ("Metadata inference failed{0}.\n\n"
"Original error is below:\n"
"------------------------\n"
"{1}\n\n"
"Traceback:\n"
"---------\n"
"{2}"
).format(" in `{0}`".format(funcname) if funcname else "",
repr(e), tb)
raise ValueError(msg)
UNKNOWN_CATEGORIES = '__UNKNOWN_CATEGORIES__'
def has_known_categories(x):
"""Returns whether the categories in `x` are known.
Parameters
----------
x : Series or CategoricalIndex
"""
x = getattr(x, '_meta', x)
if isinstance(x, pd.Series):
return UNKNOWN_CATEGORIES not in x.cat.categories
elif isinstance(x, pd.CategoricalIndex):
return UNKNOWN_CATEGORIES not in x.categories
raise TypeError("Expected Series or CategoricalIndex")
def strip_unknown_categories(x):
"""Replace any unknown categoricals with empty categoricals.
Useful for preventing ``UNKNOWN_CATEGORIES`` from leaking into results.
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
cat_mask = x.dtypes == 'category'
if cat_mask.any():
cats = cat_mask[cat_mask].index
for c in cats:
if not has_known_categories(x[c]):
x[c].cat.set_categories([], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype) and not has_known_categories(x):
x.cat.set_categories([], inplace=True)
if (isinstance(x.index, pd.CategoricalIndex) and not
has_known_categories(x.index)):
x.index = x.index.set_categories([])
elif isinstance(x, pd.CategoricalIndex) and not has_known_categories(x):
x = x.set_categories([])
return x
def clear_known_categories(x, cols=None, index=True):
"""Set categories to be unknown.
Parameters
----------
x : DataFrame, Series, Index
cols : iterable, optional
If x is a DataFrame, set only categoricals in these columns to unknown.
By default, all categorical columns are set to unknown categoricals
index : bool, optional
If True and x is a Series or DataFrame, set the clear known categories
in the index as well.
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
mask = x.dtypes == 'category'
if cols is None:
cols = mask[mask].index
elif not mask.loc[cols].all():
raise ValueError("Not all columns are categoricals")
for c in cols:
x[c].cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype):
x.cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
if index and isinstance(x.index, pd.CategoricalIndex):
x.index = x.index.set_categories([UNKNOWN_CATEGORIES])
elif isinstance(x, pd.CategoricalIndex):
x = x.set_categories([UNKNOWN_CATEGORIES])
return x
def _empty_series(name, dtype, index=None):
if isinstance(dtype, str) and dtype == 'category':
return pd.Series(pd.Categorical([UNKNOWN_CATEGORIES]),
name=name, index=index).iloc[:0]
return pd.Series([], dtype=dtype, name=name, index=index)
def make_meta(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')])
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8'))
Series([], Name: a, dtype: float64)
>>> make_meta('i8')
1
"""
if hasattr(x, '_meta'):
return x._meta
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.iloc[0:0]
elif isinstance(x, pd.Index):
return x[0:0]
index = index if index is None else index[0:0]
if isinstance(x, dict):
return pd.DataFrame({c: _empty_series(c, d, index=index)
for (c, d) in x.items()}, index=index)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError("Expected iterable of tuples of (name, dtype), "
"got {0}".format(x))
return pd.DataFrame({c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x], index=index)
elif not hasattr(x, 'dtype') and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except:
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in (pd.Int64Index, pd.Float64Index):
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(['a', 'b'], name=idx.name)
elif typ is pd.DatetimeIndex:
start = '1970-01-01'
data = [start, start] if idx.freq is None else None
return pd.DatetimeIndex(data, start=start, periods=2, freq=idx.freq,
tz=idx.tz, name=idx.name)
elif typ is pd.PeriodIndex:
return pd.PeriodIndex(start='1970-01-01', periods=2, freq=idx.freq,
name=idx.name)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, 'D')
data = [start, start] if idx.freq is None else None
return pd.TimedeltaIndex(data, start=start, periods=2, freq=idx.freq,
name=idx.name)
elif typ is pd.CategoricalIndex:
if len(idx.categories):
data = [idx.categories[0]] * 2
cats = idx.categories
else:
data = _nonempty_index(idx.categories)
cats = None
return pd.CategoricalIndex(data, categories=cats,
ordered=idx.ordered, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(i) for i in idx.levels]
labels = [[0, 0] for i in idx.levels]
return pd.MultiIndex(levels=levels, labels=labels, names=idx.names)
raise TypeError("Don't know how to handle index of "
"type {0}".format(type(idx).__name__))
_simple_fake_mapping = {
'b': np.bool_(True),
'V': np.void(b' '),
'M': np.datetime64('1970-01-01'),
'm': np.timedelta64(1),
'S': np.str_('foo'),
'a': np.str_('foo'),
'U': np.unicode_('foo'),
'O': 'foo'
}
def _scalar_from_dtype(dtype):
if dtype.kind in ('i', 'f', 'u'):
return dtype.type(1)
elif dtype.kind == 'c':
return dtype.type(complex(1, 0))
elif dtype.kind in _simple_fake_mapping:
o = _simple_fake_mapping[dtype.kind]
return o.astype(dtype) if dtype.kind in ('m', 'M') else o
else:
raise TypeError("Can't handle dtype: {0}".format(dtype))
def _nonempty_scalar(x):
if isinstance(x, (pd.Timestamp, pd.Timedelta, pd.Period)):
return x
elif np.isscalar(x):
dtype = x.dtype if hasattr(x, 'dtype') else np.dtype(type(x))
return _scalar_from_dtype(dtype)
else:
raise TypeError("Can't handle meta of type "
"'{0}'".format(type(x).__name__))
def _nonempty_series(s, idx):
dtype = s.dtype
if is_datetime64tz_dtype(dtype):
entry = pd.Timestamp('1970-01-01', tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
if len(s.cat.categories):
data = [s.cat.categories[0]] * 2
cats = s.cat.categories
else:
data = _nonempty_index(s.cat.categories)
cats = None
data = pd.Categorical(data, categories=cats,
ordered=s.cat.ordered)
else:
entry = _scalar_from_dtype(dtype)
data = np.array([entry, entry], dtype=dtype)
return pd.Series(data, name=s.name, index=idx)
def meta_nonempty(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if isinstance(x, pd.Index):
return _nonempty_index(x)
elif isinstance(x, pd.Series):
idx = _nonempty_index(x.index)
return _nonempty_series(x, idx)
elif isinstance(x, pd.DataFrame):
idx = _nonempty_index(x.index)
data = {i: _nonempty_series(x.iloc[:, i], idx)
for i, c in enumerate(x.columns)}
res = pd.DataFrame(data, index=idx,
columns=np.arange(len(x.columns)))
res.columns = x.columns
return res
elif is_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError("Expected Index, Series, DataFrame, or scalar, "
"got {0}".format(type(x).__name__))
###############################################################
# Testing
###############################################################
def _check_dask(dsk, check_names=True, check_dtypes=True):
import dask.dataframe as dd
if hasattr(dsk, 'dask'):
result = dsk.compute(get=get_sync)
if isinstance(dsk, dd.Index):
assert isinstance(result, pd.Index), type(result)
if check_names:
assert dsk.name == result.name
# cache
assert isinstance(dsk._meta, pd.Index), type(dsk._meta)
if check_names:
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.Series):
assert isinstance(result, pd.Series), type(result)
if check_names:
assert dsk.name == result.name, (dsk.name, result.name)
# cache
assert isinstance(dsk._meta, pd.Series), type(dsk._meta)
if check_names:
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.DataFrame):
assert isinstance(result, pd.DataFrame), type(result)
assert isinstance(dsk.columns, pd.Index), type(dsk.columns)
if check_names:
tm.assert_index_equal(dsk.columns, result.columns)
# cache
assert isinstance(dsk._meta, pd.DataFrame), type(dsk._meta)
if check_names:
tm.assert_index_equal(dsk._meta.columns, result.columns)
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.core.Scalar):
assert (np.isscalar(result) or
isinstance(result, (pd.Timestamp, pd.Timedelta)))
if check_dtypes:
assert_dask_dtypes(dsk, result)
else:
msg = 'Unsupported dask instance {0} found'.format(type(dsk))
raise AssertionError(msg)
return result
return dsk
def _maybe_sort(a):
# sort by value, then index
try:
if isinstance(a, pd.DataFrame):
a = a.sort_values(by=a.columns.tolist())
else:
a = a.sort_values()
except (TypeError, IndexError, ValueError):
pass
return a.sort_index()
def assert_eq(a, b, check_names=True, check_dtypes=True,
check_divisions=True, check_index=True, **kwargs):
if check_divisions:
assert_divisions(a)
assert_divisions(b)
if hasattr(a, 'divisions') and hasattr(b, 'divisions'):
at = type(np.asarray(a.divisions).tolist()[0]) # numpy to python
bt = type(np.asarray(b.divisions).tolist()[0]) # scalar conversion
assert at == bt, (at, bt)
assert_sane_keynames(a)
assert_sane_keynames(b)
a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes)
b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes)
if not check_index:
a = a.reset_index(drop=True)
b = b.reset_index(drop=True)
if isinstance(a, pd.DataFrame):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_frame_equal(a, b, **kwargs)
elif isinstance(a, pd.Series):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_series_equal(a, b, check_names=check_names, **kwargs)
elif isinstance(a, pd.Index):
tm.assert_index_equal(a, b, **kwargs)
else:
if a == b:
return True
else:
if np.isnan(a):
assert np.isnan(b)
else:
assert np.allclose(a, b)
return True
def eq(*args, **kwargs):
warnings.warn('eq is deprecated. Use assert_frame instead', UserWarning)
assert_eq(*args, **kwargs)
def assert_dask_graph(dask, label):
if hasattr(dask, 'dask'):
dask = dask.dask
assert isinstance(dask, dict)
for k in dask:
if isinstance(k, tuple):
k = k[0]
if k.startswith(label):
return True
else:
msg = "given dask graph doesn't contan label: {0}"
raise AssertionError(msg.format(label))
def assert_divisions(ddf):
if not hasattr(ddf, 'divisions'):
return
if not hasattr(ddf, 'index'):
return
if not ddf.known_divisions:
return
index = lambda x: x if isinstance(x, pd.Index) else x.index
results = get_sync(ddf.dask, ddf._keys())
for i, df in enumerate(results[:-1]):
if len(df):
assert index(df).min() >= ddf.divisions[i]
assert index(df).max() < ddf.divisions[i + 1]
if len(results[-1]):
assert index(results[-1]).min() >= ddf.divisions[-2]
assert index(results[-1]).max() <= ddf.divisions[-1]
def assert_sane_keynames(ddf):
if not hasattr(ddf, 'dask'):
return
for k in ddf.dask.keys():
while isinstance(k, tuple):
k = k[0]
assert isinstance(k, (str, bytes))
assert len(k) < 100
assert ' ' not in k
if sys.version_info[0] >= 3:
assert k.split('-')[0].isidentifier()
def assert_dask_dtypes(ddf, res, numeric_equal=True):
"""Check that the dask metadata matches the result.
If `numeric_equal`, integer and floating dtypes compare equal. This is
useful due to the implicit conversion of integer to floating upon
encountering missingness, which is hard to infer statically."""
eq_types = {'O', 'S', 'U', 'a'} # treat object and strings alike
if numeric_equal:
eq_types.update(('i', 'f'))
if isinstance(res, pd.DataFrame):
for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes],
axis=1).itertuples():
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
elif isinstance(res, (pd.Series, pd.Index)):
a = ddf._meta.dtype
b = res.dtype
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
else:
if hasattr(ddf._meta, 'dtype'):
a = ddf._meta.dtype
if not hasattr(res, 'dtype'):
assert np.isscalar(res)
b = np.dtype(type(res))
else:
b = res.dtype
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
else:
assert type(ddf._meta) == type(res)
def assert_max_deps(x, n, eq=True):
dependencies, dependents = get_deps(x.dask)
if eq:
assert max(map(len, dependencies.values())) == n
else:
assert max(map(len, dependencies.values())) <= n
| {
"repo_name": "cpcloud/dask",
"path": "dask/dataframe/utils.py",
"copies": "1",
"size": "21108",
"license": "bsd-3-clause",
"hash": -1765535917126917400,
"line_mean": 32.9903381643,
"line_max": 82,
"alpha_frac": 0.5701629714,
"autogenerated": false,
"ratio": 3.6677671589921808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47379301303921806,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import textwrap
from distutils.version import LooseVersion
from collections import Iterator
import sys
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.common import is_datetime64tz_dtype, is_categorical_dtype
import toolz
from ..async import get_sync
PANDAS_VERSION = LooseVersion(pd.__version__)
def shard_df_on_index(df, divisions):
""" Shard a DataFrame by ranges on its index
Examples
--------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
>>> list(shard_df_on_index(df, []))[0] # empty case
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
"""
from dask.dataframe.categorical import iscategorical
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
index = df.index
if iscategorical(index.dtype):
index = index.as_ordered()
indices = index.searchsorted(divisions)
yield df.iloc[:indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i]: indices[i+1]]
yield df.iloc[indices[-1]:]
def unique(divisions):
""" Polymorphic unique function
>>> list(unique([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> unique(np.array([1, 2, 3, 1, 2, 3]))
array([1, 2, 3])
>>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))
[Alice, Bob]
Categories (2, object): [Alice, Bob]
"""
if isinstance(divisions, np.ndarray):
return np.unique(divisions)
if isinstance(divisions, pd.Categorical):
return pd.Categorical.from_codes(np.unique(divisions.codes),
divisions.categories, divisions.ordered)
if isinstance(divisions, (tuple, list, Iterator)):
return tuple(toolz.unique(divisions))
raise NotImplementedError()
_META_TYPES = "meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional"
_META_DESCRIPTION = """\
An empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and
column names of the output. This metadata is necessary for many algorithms
in dask dataframe to work. For ease of use, some alternative inputs are
also available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}``
or iterable of ``(name, dtype)`` can be provided. Instead of a series, a
tuple of ``(name, dtype)`` can be used. If not provided, dask will try to
infer the metadata. This may lead to unexpected results, so providing
``meta`` is recommended. For more information, see
``dask.dataframe.utils.make_meta``.
"""
def insert_meta_param_description(*args, **kwargs):
"""Replace `$META` in docstring with param description.
If pad keyword is provided, will pad description by that number of
spaces (default is 8)."""
if not args:
return lambda f: insert_meta_param_description(f, **kwargs)
f = args[0]
if f.__doc__:
indent = " "*kwargs.get('pad', 8)
body = textwrap.wrap(_META_DESCRIPTION, initial_indent=indent,
subsequent_indent=indent, width=78)
descr = '{0}\n{1}'.format(_META_TYPES, '\n'.join(body))
f.__doc__ = f.__doc__.replace('$META', descr)
return f
def make_meta(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')])
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8'))
Series([], Name: a, dtype: float64)
>>> make_meta('i8')
1
"""
if hasattr(x, '_meta'):
return x._meta
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.iloc[0:0]
elif isinstance(x, pd.Index):
return x[0:0]
index = index if index is None else index[0:0]
if isinstance(x, dict):
return pd.DataFrame({c: pd.Series([], dtype=d)
for (c, d) in x.items()},
index=index)
elif isinstance(x, tuple) and len(x) == 2:
return pd.Series([], dtype=x[1], name=x[0], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError("Expected iterable of tuples of (name, dtype), "
"got {0}".format(x))
return pd.DataFrame({c: pd.Series([], dtype=d) for (c, d) in x},
columns=[c for c, d in x], index=index)
elif not hasattr(x, 'dtype') and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except:
# Continue on to next check
pass
if is_pd_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in (pd.Int64Index, pd.Float64Index):
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(['a', 'b'], name=idx.name)
elif typ is pd.DatetimeIndex:
start = '1970-01-01'
data = [start, start] if idx.freq is None else None
return pd.DatetimeIndex(data, start=start, periods=2, freq=idx.freq,
tz=idx.tz, name=idx.name)
elif typ is pd.PeriodIndex:
return pd.PeriodIndex(start='1970-01-01', periods=2, freq=idx.freq,
name=idx.name)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, 'D')
data = [start, start] if idx.freq is None else None
return pd.TimedeltaIndex(data, start=start, periods=2, freq=idx.freq,
name=idx.name)
elif typ is pd.CategoricalIndex:
element = idx.categories[0]
return pd.CategoricalIndex([element, element],
categories=idx.categories,
ordered=idx.ordered, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(i) for i in idx.levels]
labels = [[0, 0] for i in idx.levels]
return pd.MultiIndex(levels=levels, labels=labels, names=idx.names)
raise TypeError("Don't know how to handle index of "
"type {0}".format(type(idx).__name__))
_simple_fake_mapping = {
'b': np.bool_(True),
'V': np.void(b' '),
'M': np.datetime64('1970-01-01'),
'm': np.timedelta64(1),
'S': np.str_('foo'),
'a': np.str_('foo'),
'U': np.unicode_('foo'),
'O': 'foo'
}
def _scalar_from_dtype(dtype):
if dtype.kind in ('i', 'f', 'u'):
return dtype.type(1)
elif dtype.kind == 'c':
return dtype.type(complex(1, 0))
elif dtype.kind in _simple_fake_mapping:
o = _simple_fake_mapping[dtype.kind]
return o.astype(dtype) if dtype.kind in ('m', 'M') else o
else:
raise TypeError("Can't handle dtype: {0}".format(dtype))
def _nonempty_scalar(x):
if isinstance(x, (pd.Timestamp, pd.Timedelta, pd.Period)):
return x
elif np.isscalar(x):
dtype = x.dtype if hasattr(x, 'dtype') else np.dtype(type(x))
return _scalar_from_dtype(dtype)
else:
raise TypeError("Can't handle meta of type "
"'{0}'".format(type(x).__name__))
def is_pd_scalar(x):
"""Whether the object is a scalar type"""
return (np.isscalar(x) or isinstance(x, (pd.Timestamp, pd.Timedelta,
pd.Period)))
def _nonempty_series(s, idx):
dtype = s.dtype
if is_datetime64tz_dtype(dtype):
entry = pd.Timestamp('1970-01-01', tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
entry = s.cat.categories[0]
data = pd.Categorical([entry, entry],
categories=s.cat.categories,
ordered=s.cat.ordered)
else:
entry = _scalar_from_dtype(dtype)
data = np.array([entry, entry], dtype=dtype)
return pd.Series(data, name=s.name, index=idx)
def meta_nonempty(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if isinstance(x, pd.Index):
return _nonempty_index(x)
elif isinstance(x, pd.Series):
idx = _nonempty_index(x.index)
return _nonempty_series(x, idx)
elif isinstance(x, pd.DataFrame):
idx = _nonempty_index(x.index)
data = {c: _nonempty_series(x[c], idx) for c in x.columns}
return pd.DataFrame(data, columns=x.columns, index=idx)
elif is_pd_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError("Expected Index, Series, DataFrame, or scalar, "
"got {0}".format(type(x).__name__))
###############################################################
# Testing
###############################################################
def _check_dask(dsk, check_names=True, check_dtypes=True):
import dask.dataframe as dd
if hasattr(dsk, 'dask'):
result = dsk.compute(get=get_sync)
if isinstance(dsk, dd.Index):
assert isinstance(result, pd.Index), type(result)
if check_names:
assert dsk.name == result.name
# cache
assert isinstance(dsk._meta, pd.Index), type(dsk._meta)
if check_names:
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.Series):
assert isinstance(result, pd.Series), type(result)
if check_names:
assert dsk.name == result.name, (dsk.name, result.name)
# cache
assert isinstance(dsk._meta, pd.Series), type(dsk._meta)
if check_names:
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.DataFrame):
assert isinstance(result, pd.DataFrame), type(result)
assert isinstance(dsk.columns, pd.Index), type(dsk.columns)
if check_names:
tm.assert_index_equal(dsk.columns, result.columns)
# cache
assert isinstance(dsk._meta, pd.DataFrame), type(dsk._meta)
if check_names:
tm.assert_index_equal(dsk._meta.columns, result.columns)
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.core.Scalar):
assert (np.isscalar(result) or
isinstance(result, (pd.Timestamp, pd.Timedelta)))
if check_dtypes:
assert_dask_dtypes(dsk, result)
else:
msg = 'Unsupported dask instance {0} found'.format(type(dsk))
raise AssertionError(msg)
return result
return dsk
def _maybe_sort(a):
# sort by value, then index
try:
if isinstance(a, pd.DataFrame):
a = a.sort_values(by=a.columns.tolist())
else:
a = a.sort_values()
except (TypeError, IndexError, ValueError):
pass
return a.sort_index()
def eq(a, b, check_names=True, check_dtypes=True, check_divisions=True,
**kwargs):
if check_divisions:
assert_divisions(a)
assert_divisions(b)
assert_sane_keynames(a)
assert_sane_keynames(b)
a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes)
b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes)
if isinstance(a, pd.DataFrame):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_frame_equal(a, b, **kwargs)
elif isinstance(a, pd.Series):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_series_equal(a, b, check_names=check_names, **kwargs)
elif isinstance(a, pd.Index):
tm.assert_index_equal(a, b, **kwargs)
else:
if a == b:
return True
else:
if np.isnan(a):
assert np.isnan(b)
else:
assert np.allclose(a, b)
return True
def assert_dask_graph(dask, label):
if hasattr(dask, 'dask'):
dask = dask.dask
assert isinstance(dask, dict)
for k in dask:
if isinstance(k, tuple):
k = k[0]
if k.startswith(label):
return True
else:
msg = "given dask graph doesn't contan label: {0}"
raise AssertionError(msg.format(label))
def assert_divisions(ddf):
if not hasattr(ddf, 'divisions'):
return
if not hasattr(ddf, 'index'):
return
if not ddf.known_divisions:
return
results = get_sync(ddf.dask, ddf._keys())
for i, df in enumerate(results[:-1]):
if len(df):
assert df.index.min() >= ddf.divisions[i]
assert df.index.max() < ddf.divisions[i + 1]
if len(results[-1]):
assert results[-1].index.min() >= ddf.divisions[-2]
assert results[-1].index.max() <= ddf.divisions[-1]
def assert_sane_keynames(ddf):
if not hasattr(ddf, 'dask'):
return
for k in ddf.dask.keys():
while isinstance(k, tuple):
k = k[0]
assert isinstance(k, (str, bytes))
assert len(k) < 100
assert ' ' not in k
if sys.version_info[0] >= 3:
assert k.split('-')[0].isidentifier()
def assert_dask_dtypes(ddf, res, numeric_equal=True):
"""Check that the dask metadata matches the result.
If `numeric_equal`, integer and floating dtypes compare equal. This is
useful due to the implicit conversion of integer to floating upon
encountering missingness, which is hard to infer statically."""
eq_types = {'O', 'S', 'U', 'a'} # treat object and strings alike
if numeric_equal:
eq_types.update(('i', 'f'))
if isinstance(res, pd.DataFrame):
for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes],
axis=1).itertuples():
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
elif isinstance(res, (pd.Series, pd.Index)):
a = ddf._meta.dtype
b = res.dtype
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
else:
if hasattr(ddf._meta, 'dtype'):
a = ddf._meta.dtype
if not hasattr(res, 'dtype'):
assert np.isscalar(res)
b = np.dtype(type(res))
else:
b = res.dtype
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
else:
assert type(ddf._meta) == type(res)
| {
"repo_name": "cowlicks/dask",
"path": "dask/dataframe/utils.py",
"copies": "1",
"size": "16030",
"license": "bsd-3-clause",
"hash": 8129658437381091000,
"line_mean": 32.7473684211,
"line_max": 79,
"alpha_frac": 0.566625078,
"autogenerated": false,
"ratio": 3.594170403587444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46607954815874436,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tflearn
def textfile_to_seq(file, seq_maxlen=25, redun_step=3):
""" string_to_semi_redundant_sequences.
Vectorize a string and returns parsed sequences and targets, along with
the associated dictionary.
Arguments:
string: `str`. Lower-case text from input text file.
seq_maxlen: `int`. Maximum length of a sequence. Default: 25.
redun_step: `int`. Redundancy step. Default: 3.
Returns:
`tuple`: (inputs, targets, dictionary)
"""
import numpy as np
import re
print("Vectorizing text...")
import codecs
f = codecs.open('toponims.txt', "r", "utf-8")
string = f.read()
string.encode('utf-8')
string = re.sub( '([A-Z])', '^\\1', string ).lower()
chars = set()
chars.update(string)
char_idx = {c: i for i, c in enumerate(chars)}
sequences = []
next_chars = []
for i in range(0, len(string) - seq_maxlen, redun_step):
sequences.append(string[i: i + seq_maxlen])
next_chars.append(string[i + seq_maxlen])
X = np.zeros((len(sequences), seq_maxlen, len(chars)), dtype=np.bool)
Y = np.zeros((len(sequences), len(chars)), dtype=np.bool)
for i, seq in enumerate(sequences):
for t, char in enumerate(seq):
X[i, t, char_idx[char]] = 1
Y[i, char_idx[next_chars[i]]] = 1
print("Text total length: " + str(len(string)))
print("Distinct chars: " + str(len(chars)))
print("Total sequences: " + str(len(sequences)))
return X, Y, char_idx
def random_sequence_from_string(string, seq_maxlen):
import random
rand_index = random.randint(0, len(string) - seq_maxlen - 1)
return string[rand_index: rand_index + seq_maxlen]
def random_sequence_from_textfile(path, seq_maxlen):
import codecs
import re
f = codecs.open(path, "r", "utf-8")
text = f.read()
text.encode('utf-8')
text = re.sub( '([A-Z])', '^\\1', text ).lower()
return random_sequence_from_string(text, seq_maxlen)
path = 'toponims.txt'
maxlen = 20
X, Y, char_idx = \
textfile_to_seq(path, seq_maxlen=maxlen, redun_step=3)
g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 64, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 64)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.01)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0)
for i in range(100):
seed = random_sequence_from_textfile(path, maxlen)
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=1, run_id='toponims')
print("-- TESTING...")
print("-- EPOCH = ", i)
print("-- Test with temperature of 1.2 --")
print(m.generate(30, temperature=1.2, seq_seed=seed))
print("-- Test with temperature of 1.0 --")
print(m.generate(30, temperature=1.0, seq_seed=seed))
print("-- Test with temperature of 0.5 --")
print(m.generate(30, temperature=0.5, seq_seed=seed)) | {
"repo_name": "jvitria/DeepLearningBBVA2016",
"path": "helpers/names.py",
"copies": "2",
"size": "3235",
"license": "mit",
"hash": 7035212594129675000,
"line_mean": 34.5604395604,
"line_max": 76,
"alpha_frac": 0.6182380216,
"autogenerated": false,
"ratio": 3.1998021760633035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9796533880884607,
"avg_score": 0.004301263355739168,
"num_lines": 91
} |
from __future__ import absolute_import, division, print_function
import threading
from Queue import Empty
import cloudpickle as cp
import pytest
from mentor.queue import LockingQueue, Queue
def test_queue_put_get(zk):
queue = Queue(zk, '/mentor/putget')
queue.put(cp.dumps(range(5)))
assert cp.loads(queue.get()) == range(5)
def test_locking_queue_put_get(zk):
queue = LockingQueue(zk, '/mentor/putget_locking')
queue.put(cp.dumps(range(5)))
assert queue.get() == cp.dumps(range(5))
queue.consume()
def test_queue_serde(zk):
queue = Queue(zk, '/mentor/serde')
queue.put(cp.dumps({'a': 1, 'b': 2}))
queue.put(cp.dumps({'c': 3}))
pickled_queue = cp.dumps(queue)
unpickled_queue = cp.loads(pickled_queue)
assert cp.loads(unpickled_queue.get()) == {'a': 1, 'b': 2}
assert cp.loads(unpickled_queue.get()) == {'c': 3}
def test_locking_queue_serde(zk):
queue = LockingQueue(zk, '/mentor/serde_locking')
queue.put(cp.dumps({'a': 1, 'b': 2}))
queue.put(cp.dumps({'c': 3}))
pickled_queue = cp.dumps(queue)
unpickled_queue = cp.loads(pickled_queue)
assert cp.loads(unpickled_queue.get()) == {'a': 1, 'b': 2}
unpickled_queue.consume()
assert cp.loads(unpickled_queue.get()) == {'c': 3}
unpickled_queue.consume()
def test_queue_size(zk):
queue = Queue(zk, '/mentor/size')
assert queue.empty()
assert queue.qsize() == 0
queue.put(cp.dumps(range(5)))
assert queue.empty() is False
assert queue.qsize() == 1
def test_queue_blocking_get(zk):
queue = Queue(zk, '/mentor/blocking')
def delayed_put():
import time
time.sleep(2)
queue.put(cp.dumps(range(5)))
t = threading.Thread(target=delayed_put)
t.start()
with pytest.raises(Empty):
queue.get(block=True, timeout=1)
assert queue.get(block=True, timeout=2) == cp.dumps(range(5))
| {
"repo_name": "lensacom/satyr",
"path": "mentor/tests/test_queue.py",
"copies": "1",
"size": "1905",
"license": "apache-2.0",
"hash": 294234648320201300,
"line_mean": 25.095890411,
"line_max": 65,
"alpha_frac": 0.6330708661,
"autogenerated": false,
"ratio": 3.0431309904153356,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9176201856515336,
"avg_score": 0,
"num_lines": 73
} |
from __future__ import absolute_import, division, print_function
import threading
from . import ir, pipeline, transforms
#------------------------------------------------------------------------
# Passes
#------------------------------------------------------------------------
passes = [
transforms.explicit_coercions,
]
#------------------------------------------------------------------------
# Execution Context
#------------------------------------------------------------------------
_tls = threading.local()
def current_ctx():
"""Return the current evaluation strategy"""
try:
return _tls.ctx
except AttributeError:
_tls.ctx = ir.ExecutionContext()
return current_ctx()
#------------------------------------------------------------------------
# Prepare
#------------------------------------------------------------------------
def prepare(expr, strategy):
"""
Prepare a Deferred for interpretation
"""
graph, expr_ctx = expr
f, values = ir.from_expr(graph, expr_ctx, current_ctx())
env = {'strategy': strategy}
func, env = pipeline.run_pipeline(f, env, passes)
return func, env
| {
"repo_name": "zeeshanali/blaze",
"path": "blaze/compute/air/prepare.py",
"copies": "1",
"size": "1169",
"license": "bsd-3-clause",
"hash": 5501974357735049000,
"line_mean": 26.1860465116,
"line_max": 73,
"alpha_frac": 0.4063301967,
"autogenerated": false,
"ratio": 5.566666666666666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003911521363933469,
"num_lines": 43
} |
from __future__ import absolute_import, division, print_function
import time
from Queue import Empty
import cloudpickle as cp
from kazoo.client import KazooClient
from kazoo.recipe.queue import LockingQueue as KazooLockingQueue
from kazoo.recipe.queue import Queue as KazooQueue
from .utils import timeout as seconds
from .utils import TimeoutError
class SerializableMixin(object):
def __getstate__(self):
hosts = ["{}:{}".format(h, p) for h, p in self.client.hosts]
client = ",".join(hosts)
result = self.__dict__.copy()
result['client'] = client
return result
def __setstate__(self, state):
hosts = state.pop('client')
client = KazooClient(hosts)
client.start()
self.__dict__ = state
self.client = client
class CompatMixin(object): # Python's Queue compatibility
def __bool__(self):
return True
def __nonzero__(self):
return True
def qsize(self):
return len(self)
def empty(self):
return len(self) == 0
class Queue(CompatMixin, SerializableMixin, KazooQueue):
def get(self, block=True, timeout=-1):
result = super(Queue, self).get()
if block:
try:
with seconds(timeout):
while result is None:
result = super(Queue, self).get()
time.sleep(0.1)
except TimeoutError:
raise Empty
return cp.loads(result)
def put(self, item):
value = cp.dumps(item)
return super(Queue, self).put(value)
class LockingQueue(CompatMixin, SerializableMixin, KazooLockingQueue):
def get(self, block=True, timeout=-1):
result = super(LockingQueue, self).get(timeout=timeout)
return cp.loads(result)
def put(self, item, priority=100):
value = cp.dumps(item)
return super(LockingQueue, self).put(value, priority=priority)
| {
"repo_name": "lensacom/satyr",
"path": "mentor/queue.py",
"copies": "1",
"size": "1969",
"license": "apache-2.0",
"hash": -2928521527677109000,
"line_mean": 23.9240506329,
"line_max": 70,
"alpha_frac": 0.6109700356,
"autogenerated": false,
"ratio": 3.977777777777778,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 79
} |
from __future__ import absolute_import, division, print_function
import time
import importlib
import json
import copy
import pdb
import tensorflow as tf
from tensorflow.python.ops import variables
import numpy as np
import tfutils.utils as utils
from tfutils.error import NanLossError, HiLossError, NoChangeError
from tfutils.utils import strip_prefix
from tfutils.db_interface import DBInterface
from tfutils.helper import \
parse_params, get_params, \
get_data, get_model, get_loss, \
split_input, log, get_model
from tfutils.validation import run_all_validations, get_valid_targets_dict
from tfutils.defaults import \
DEFAULT_HOST, DEFAULT_LOOP_PARAMS, \
DEFAULT_TRAIN_THRES_LOSS, DEFAULT_PARAMS
from tfutils.tpu_train import tpu_train_from_params
def train_from_params(
save_params,
model_params,
train_params,
loss_params=None,
learning_rate_params=None,
optimizer_params=None,
validation_params=None,
load_params=None,
log_device_placement=DEFAULT_PARAMS['log_device_placement'], # advanced
dont_run=DEFAULT_PARAMS['dont_run'], # advanced
skip_check=DEFAULT_PARAMS['skip_check'], # advanced
use_estimator=False
):
"""
Main training interface function.
Args:
save_params (dict):
Describing the parameters used to construct the save database, and
control saving. These include:
- host (str)
Hostname where database connection lives
- port (int)
Port where database connection lives
- dbname (str)
Name of database for storage
- collname (str)
Name of collection for storage
- exp_id (str)
Experiment id descriptor
NOTE: the variables host/port/dbname/coll/exp_id control
the location of the saved data for the run, in order of
increasing specificity. When choosing these, note that:
- If a given host/port/dbname/coll/exp_id already has saved checkpoints,\
then any new call to start training with these same location variables\
will start to train from the most recent saved checkpoint. If you mistakenly\
try to start training a new model with different variable names, or structure,\
from that existing checkpoint, an error will be raised, as the model will be\
incompatiable with the saved variables.
- When choosing what dbname, coll, and exp_id, to use, keep in mind that mongodb\
queries only operate over a single collection. So if you want to analyze\
results from a bunch of experiments together using mongod queries, you should\
put them all in the same collection, but with different exp_ids. If, on the\
other hand, you never expect to analyze data from two experiments together,\
you can put them in different collections or different databases. Choosing\
between putting two experiments in two collections in the same database\
or in two totally different databases will depend on how you want to organize\
your results and is really a matter of preference.
- do_save (bool, default: True)
Whether to save to database
- save_initial_filters (bool, default: True)
Whether to save initial model filters at step = 0,
- save_metrics_freq (int, default: 5)
How often to store train results to database
- save_valid_freq (int, default: 3000)
How often to calculate and store validation results
to database
- save_filters_freq (int, default: 30000)
How often to save filter values to database
- cache_filters_freq (int, default: 3000)
How often to cache filter values locally and save
to ___RECENT database
- cache_max_num (int, default: 6)
Maximal number of cached filters to keep in __RECENT database
- cache_dir (str, default: None)
Path where caches will be saved locally. If None, will default to
~/.tfutils/<host:post>/<dbname>/<collname>/<exp_id>.
model_params (dict): Containing function that produces model and arguments to that function.
- model_params['func']
The function producing the model.
The function's signature is:
Args:
- ``inputs``: data object
- ``train`` (boolean): if in training or testing
- ``seed`` (int): seed for use in random generation
Returns:
- ``outputs`` (tf.Operations): train output tensorflow nodes
- Additional configurations you want to store in database
- Remaining items in model_params are dictionary of arguments passed to func.
train_params (dict): Containing params for data sources and targets in training.
- train_params['data_params']
This contains params for the data
- ``train_params['data_params']['func']`` is the function that constructs the data:
The function's signature is:
Args:
- ``batch_size``: Batch size for input data
Returns:
- ``inputs``: A dictionary of tensors that will be sent to model function
- ``train_params['data_params']['batch_size']`` batch size of the data, will be sent to func
- Remainder of ``train_params['data_params']`` are kwargs passed to func
- train_params['targets'] (optional)
contains params for additional train targets
- ``train_params['targets']['func']`` is a function that produces tensorflow nodes as training targets:
The function's signature is:
Args:
- ``inputs``: returned values of ``train_params['data_params']['func']``
- ``output``: first returned value of ``train_params['model_params']['func']``
Returns:
A dictionary of tensors that will be computed and stored in the database
- Remainder of ``train_parms['targets']`` are arguments to func.
- train_params['validate_first'] (optional, bool, default is True):
controls whether validating before training
- train_params['thres_loss'] (optional, float, default: 100):
If loss exceeds this during training, HiLossError is thrown
- train_params['num_steps'] (int or None, default: None):
How many total steps of the optimization are run.
If None, train is run until process is cancelled.
loss_params (dict): Parameters for helper.get_loss_base function to build loss.
- loss_params['pred_targets'] (a string or a list of strings):
contain the names of inputs nodes that will be sent into the loss function
- loss_params['loss_func']:
the function used to calculate the loss. Must be provided.
- loss_params['loss_func_kwargs'] (dict):
Keyword parameters sent to ``loss_params['loss_func']``. Default is {}.
- loss_params['agg_func']:
The aggregate function, default is None.
- loss_params['agg_func_kwargs']:
Keyword parameters sent to ``loss_params['agg_func']``. Default is {}.
- loss_params['loss_per_case_func'] (Deprecated):
Deprecated parameter, the same as ``loss_params['loss_func']``.
- loss_params['targets'] (Deprecated):
Deprecated parameter, the same as ``loss_params['targets']``.
learning_rate_params (dict): Parameters for specifying learning_rate.
- learning_rate_params['func']:
The function producing tensorflow node acting as learning rate.
This function must accept argument ``global_step``.
- remainder of learning_rate_params are arguments to func.
optimizer_params (dict): Parameters for creating optimizer.
- optimizer_params['optimizer']:
A class producing an optimizer object,
which should have function ``compute_gradients`` and ``apply_gradients``.
The signatures of these two functions are similar as tensorflow basic optimizer classes.
Must accept:
- "learning_rate" -- the result of the learning_rate_func call
- Remainder of optimizer_params (aside form "optimizer") are arguments
to the optimizer func
- optimizer_params['func'] (Deprecated):
Deprecated parameter, the same as ``optimizer_params['optimizer']``.
validation_params (dict): Dictionary of validation sources. The structure if this dictionary is:
{
<validation_target_name_1>: {
data: {
'func': (callable) data source function for this validation,
<kwarg1>: <value1> for 'func',
...
},
targets: {
'func': (callable) returning targets,
<kwarg1>: <value1> for 'func',
...
},
num_steps (int):
number of batches of validation source to compute,
agg_func (optional, callable):
how to aggregate validation results
across batches after computation. Signature is:
- one input argument: the list of validation batch results
- one output: aggregated version
Default is ``utils.identity_func``
online_agg_func (optional, callable):
how to aggregate validation results
on a per-batch basis. Siganture is:
- three input arguments: (current aggregate, new result, step)
- one output: new aggregated result
On first step, current aggregate passed in is None.
The final result is passed to the "agg_func".
Default is ``utils.append_and_return``
},
<validation_target_name_2>: ...
}
For each validation_target_name key, the targets are computed and then added to
the output dictionary to be computed every so often -- unlike train_targets which
are computed on each time step, these are computed on a basic controlled by the
valid_save_freq specific in the save_params.
load_params (dict):
Similar to save_params, if you want loading to happen from a different
location than where saving occurs. Parameters include:
- host (str)
Hostname where database connection lives
- port (int)
Port where database connection lives
- dbname (str)
Name of database for storage
- collname (str)
Name of collection for storage
- exp_id (str)
Experiment id descriptor
- do_restore (bool, default: True)
Whether to restore from saved model
- query (dict)
mongodb query describing how to load from loading database
- from_ckpt (string)
Path to load from a TensorFlow checkpoint (instead of from the db)
- to_restore (list of strings or a regex/callable which returns strings)
Specifies which variables should be loaded from the checkpoint.
Any variables not specified here will be reinitialized.
- load_param_dict (dict)
A dictionary whose keys are the names of the variables that are to be loaded
from the checkpoint, and the values are the names of the variables of the model
that you want to restore with the value of the corresponding checkpoint variable.
log_device_placement (bool, default is False):
Advanced parameter. Whether to log device placement in tensorflow session
dont_run (bool, default is False):
Advanced parameter. Whether returning everything, not actually training
skip_check (bool, default is False):
Advanced parameter. Whether skipping github check, could be useful when working in detached head
"""
# use tpu only if a tpu_name has been specified and not a multi-model
if isinstance(model_params, list): # multi-model mode
use_tpu = (model_params[0].get('tpu_name', None) is not None)
assert(use_tpu is False)
else:
use_tpu = (model_params.get('tpu_name', None) is not None)
if use_tpu:
log.info('Using tpu: %s' %model_params['tpu_name'])
params, train_args = parse_params('train',
model_params,
dont_run=dont_run,
skip_check=skip_check,
load_params=load_params,
loss_params=loss_params,
save_params=save_params,
train_params=train_params,
optimizer_params=optimizer_params,
validation_params=validation_params,
learning_rate_params=learning_rate_params,
log_device_placement=log_device_placement,
use_tpu=use_tpu or use_estimator
)
if use_estimator or use_tpu:
return tpu_train_from_params(params, train_args, use_tpu=use_tpu)
else:
with tf.Graph().as_default(), tf.device(DEFAULT_HOST):
# For convenience, use list of dicts instead of dict of lists
_params = [{key: value[i] for (key, value) in params.items()}
for i in range(len(params['model_params']))]
_trargs = [{key: value[i] for (key, value) in train_args.items()}
for i in range(len(params['model_params']))]
# Use a single dataprovider for all models.
data_params = _params[0]['train_params']['data_params']
_params[0]['train_params']['data_params'], inputs \
= get_data(**data_params)
# Build a graph for each distinct model.
var_manager_list = []
for param, trarg in zip(_params, _trargs):
_, _, param, trarg, var_manager \
= get_model(inputs,
param['model_params'],
param=param,
trarg=trarg)
trarg['validation_targets'], _ = \
get_valid_targets_dict(
var_manager=var_manager,
**param)
var_manager_list.append(var_manager)
# Create session.
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(
config=tf.ConfigProto(
allow_soft_placement=True,
gpu_options=gpu_options,
log_device_placement=log_device_placement,
))
# Initialize variables here
init_op_global = tf.global_variables_initializer()
sess.run(init_op_global)
init_op_local = tf.local_variables_initializer()
sess.run(init_op_local)
log.info('Initialized from scratch first')
# Build database interface for each model
# This interface class will handle the records saving, model saving, and
# model restoring.
for param, trarg, var_manager in zip(_params, _trargs, var_manager_list):
trarg['dbinterface'] = DBInterface(sess=sess,
params=param,
var_manager=var_manager,
global_step=trarg['global_step'],
save_params=param['save_params'],
load_params=param['load_params'])
## Model will be restored from saved database here
trarg['dbinterface'].initialize()
# Convert back to a dictionary of lists
params = {key: [param[key] for param in _params]
for key in _params[0].keys()}
train_args = {key: [trarg[key] for trarg in _trargs]
for key in _trargs[0].keys()}
if dont_run:
return train_args
return train(sess, **train_args)
def train(sess,
dbinterface,
train_loop,
train_targets,
global_step,
num_minibatches=1,
num_steps=float('inf'),
thres_loss=DEFAULT_TRAIN_THRES_LOSS,
validate_first=True,
validation_targets=None):
"""Actually runs the training evaluation loop.
Args:
sess (tesorflow.Session):
Object in which to run calculations.
dbinterface (DBInterface object): Saver through which to save results.
train_loop (callable withs args: sess and train_targets):
Callable that specifies a custom training loop
train_targets (dict of tensorflow nodes): Targets to train.
One item in this dict must be "optimizer" or similar
to make anything happen
num_minibatches (int): How many minibatches to use to before applying gradient update.
num_steps (int): How many steps to train to before quitting
validation_targets (dict of tensorflow objects, default: None):
Objects on which validation will be computed
thres_loss (float, default: 100):
If loss exceeds this during training, HiLossError is thrown
"""
# Collect args in a dict of lists
train_args = {
'num_steps': num_steps,
'thres_loss': thres_loss,
'train_loop': train_loop,
'global_step': global_step,
'dbinterface': dbinterface,
'train_targets': train_targets,
'validate_first': validate_first,
'num_minibatches': num_minibatches,
'validation_targets': validation_targets}
# Convert to a list of dicts
trargs = [{key: value[i] for (key, value) in train_args.items()}
for i in range(len(train_targets))]
num_steps = [t['num_steps'] for t in trargs]
steps = [t['global_step'].eval(session=sess) for t in trargs]
# Start initial validation
for (step, trarg) in zip(steps, trargs):
if step >= trarg['num_steps']:
log.info('Training cancelled since step ({}) is >= num_steps ({})'.
format(step, trarg['num_steps']))
return
log.info('Training beginning ...')
if step == 0:
trarg['dbinterface'].start_time_step = time.time()
if trarg['validate_first']:
valid_res = run_all_validations(
sess,
trarg['validation_targets'],
dbinterface=trarg['dbinterface'])
train_loop = train_args['train_loop'][0]
train_targets = train_args['train_targets']
# Run training
while any(step < num_step for (step, num_step) in zip(steps, num_steps)):
start_time_step = time.time()
train_results = train_loop(
sess, train_targets,
num_minibatches=trarg['num_minibatches'])
for (step, trarg, train_res) in zip(steps, trargs, train_results):
old_step = step
step = trarg['global_step'].eval(session=sess)
if step <= old_step:
raise NoChangeError(\
'Your optimizer should have incremented the global step,'
' but did not: old_step=%d, new_step=%d' \
% (old_step, step))
if np.isnan(train_res['loss']):
raise NanLossError(\
'Loss has become NaN')
if train_res['loss'] > trarg['thres_loss']:
raise HiLossError(\
'Loss {:.2f} exceeded the threshold {:.2f}'.format(
train_res['loss'],
trarg['thres_loss']))
# Validation
vtargs = trarg['validation_targets'] \
if step % trarg['dbinterface'].save_valid_freq == 0 else {}
valid_res = run_all_validations(sess, vtargs)
# Save
trarg['dbinterface'].start_time_step = start_time_step
trarg['dbinterface'].save(train_res=train_res,
valid_res=valid_res,
validation_only=False)
steps = [t['global_step'].eval(session=sess) for t in trargs]
# Sync and close the session
res = []
for trarg in trargs:
trarg['dbinterface'].sync_with_host()
res.append(trarg['dbinterface'].outrecs)
sess.close()
return res
| {
"repo_name": "neuroailab/tfutils",
"path": "tfutils/train.py",
"copies": "1",
"size": "22221",
"license": "mit",
"hash": -6003037070600578000,
"line_mean": 42.0639534884,
"line_max": 119,
"alpha_frac": 0.5507852932,
"autogenerated": false,
"ratio": 4.907464664310954,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027235133914969576,
"num_lines": 516
} |
from __future__ import absolute_import, division, print_function
import time
import json
import os
import tempfile
import threading
from collections import defaultdict, Iterable
import numpy as np
from lcm import LCM
from robotlocomotion import viewer2_comms_t
from director.thirdparty import transformations
class ClientIDFactory(object):
def __init__(self):
self.pid = os.getpid()
self.counter = 0
def new_client_id(self):
self.counter += 1
return "py_{:d}_{:d}".format(self.pid, self.counter)
CLIENT_ID_FACTORY = ClientIDFactory()
def to_lcm(data):
msg = viewer2_comms_t()
msg.utime = data["utime"]
msg.format = "treeviewer_json"
msg.format_version_major = 1
msg.format_version_minor = 0
msg.data = bytearray(json.dumps(data), encoding='utf-8')
msg.num_bytes = len(msg.data)
return msg
def serialize_transform(tform):
return {
"translation": list(transformations.translation_from_matrix(tform)),
"quaternion": list(transformations.quaternion_from_matrix(tform))
}
class GeometryData(object):
__slots__ = ["geometry", "color", "transform"]
def __init__(self, geometry, color=(1., 1., 1., 1.), transform=np.eye(4)):
self.geometry = geometry
self.color = color
self.transform = transform
def serialize(self):
params = self.geometry.serialize()
params["color"] = list(self.color)
params["transform"] = serialize_transform(self.transform)
return params
class BaseGeometry(object):
def serialize(self):
raise NotImplementedError()
class Box(BaseGeometry):
__slots__ = ["lengths"]
def __init__(self, lengths=[1,1,1]):
self.lengths = lengths
def serialize(self):
return {
"type": "box",
"lengths": list(self.lengths)
}
class Sphere(BaseGeometry):
__slots__ = ["radius"]
def __init__(self, radius=1):
self.radius = radius
def serialize(self):
return {
"type": "sphere",
"radius": self.radius
}
class Ellipsoid(BaseGeometry):
__slots__ = ["radii"]
def __init__(self, radii=[1,1,1]):
self.radii = radii
def serialize(self):
return {
"type": "ellipsoid",
"radii": list(self.radii)
}
class Cylinder(BaseGeometry):
__slots__ = ["length", "radius"]
def __init__(self, length=1, radius=1):
self.length = length
self.radius = radius
def serialize(self):
return {
"type": "cylinder",
"length": self.length,
"radius": self.radius
}
class Triad(BaseGeometry):
__slots__ = ["tube", "scale"]
def __init__(self, scale=1.0, tube=False):
self.scale = scale
self.tube = tube
def serialize(self):
return {
"type": "triad",
"scale": self.scale,
"tube": self.tube
}
class PointCloud(BaseGeometry):
__slots__ = ["points", "channels"]
def __init__(self, points, channels={}):
self.points = points
self.channels = channels
def serialize(self):
return {
"type": "pointcloud",
"points": [list(p) for p in self.points],
"channels": {name: [list(c) for c in values] for (name, values) in self.channels.iteritems()}
}
class PolyLine(BaseGeometry):
def __init__(self, points, radius=0.01, closed=False,
start_head=False, end_head=False,
head_radius=0.05, head_length=None):
self.points = points
self.radius = radius
self.closed = closed
self.start_head = start_head
self.end_head = end_head
self.head_radius = head_radius
self.head_length = head_length if head_length is not None else head_radius
def serialize(self):
data = {
"type": "line",
"points": [list(p) for p in self.points],
"radius": self.radius,
"closed": self.closed
}
if self.start_head or self.end_head:
data["start_head"] = self.start_head
data["end_head"] = self.end_head
data["head_radius"] = self.head_radius
data["head_length"] = self.head_length
return data
class LazyTree(object):
__slots__ = ["geometries", "transform", "children"]
def __init__(self, geometries=None, transform=np.eye(4)):
if geometries is None:
geometries = []
self.geometries = geometries
self.transform = transform
self.children = defaultdict(lambda: LazyTree())
def __getitem__(self, item):
return self.children[item]
def getdescendant(self, path):
t = self
for p in path:
t = t[p]
return t
def descendants(self, prefix=tuple()):
result = []
for (key, val) in self.children.items():
childpath = prefix + (key,)
result.append(childpath)
result.extend(val.descendants(childpath))
return result
class CommandQueue(object):
def __init__(self):
self.settransform = set()
self.setgeometry = set()
self.delete = set()
def isempty(self):
return not (self.settransform or self.setgeometry or self.delete)
def empty(self):
self.settransform = set()
self.setgeometry = set()
self.delete = set()
class Visualizer(object):
"""
A Visualizer is a lightweight object that contains a CoreVisualizer and a
path. The CoreVisualizer does all of the work of storing geometries and
publishing LCM messages. By storing the path in the Visualizer instance,
we make it easy to do things like store or pass a Visualizer that draws to
a sub-part of the viewer tree.
Many Visualizer objects can all share the same CoreVisualizer.
"""
__slots__ = ["core", "path"]
def __init__(self, path=None, lcm=None, core=None):
if core is None:
core = CoreVisualizer(lcm)
if path is None:
path = tuple()
else:
if isinstance(path, str):
path = tuple(path.split("/"))
if not path[0]:
path = tuple([p for p in path if p])
self.core = core
self.path = path
def setgeometry(self, geomdata):
"""
Set the geometries at this visualizer's path to the given
geomdata (replacing whatever was there before).
geomdata can be any one of:
* a single BaseGeometry
* a single GeometryData
* a collection of any combinations of BaseGeometry and GeometryData
"""
self.core.setgeometry(self.path, geomdata)
return self
def settransform(self, tform):
"""
Set the transform for this visualizer's path (and, implicitly,
any descendants of that path).
tform should be a 4x4 numpy array representing a homogeneous transform
"""
self.core.settransform(self.path, tform)
def delete(self):
"""
Delete the geometry at this visualizer's path.
"""
self.core.delete(self.path)
def __getitem__(self, path):
"""
Indexing into a visualizer returns a new visualizer with the given
path appended to this visualizer's path.
"""
return Visualizer(path=self.path + (path,),
lcm=self.core.lcm,
core=self.core)
def start_handler(self):
"""
Start a Python thread that will subscribe to messages from the remote
viewer and handle those responses. This enables automatic reloading of
geometry into the viewer if, for example, the viewer is restarted
later.
"""
self.core.start_handler()
class CoreVisualizer(object):
def __init__(self, lcm=None):
if lcm is None:
lcm = LCM()
self.lcm = lcm
self.client_id = CLIENT_ID_FACTORY.new_client_id()
self.tree = LazyTree()
self.queue = CommandQueue()
self.publish_immediately = True
self.lcm.subscribe(self._response_channel(),
self._handle_response)
self.handler_thread = None
def _request_channel(self):
return "DIRECTOR_TREE_VIEWER_REQUEST_<{:s}>".format(self.client_id)
def _response_channel(self):
return "DIRECTOR_TREE_VIEWER_RESPONSE_<{:s}>".format(self.client_id)
def _handler_loop(self):
while True:
self.lcm.handle()
def start_handler(self):
if self.handler_thread is not None:
return
self.handler_thread = threading.Thread(
target=self._handler_loop)
self.handler_thread.daemon = True
self.handler_thread.start()
def _handle_response(self, channel, msgdata):
msg = viewer2_comms_t.decode(msgdata)
data = json.loads(msg.data.decode())
if data["status"] == 0:
pass
elif data["status"] == 1:
for path in self.tree.descendants():
self.queue.setgeometry.add(path)
self.queue.settransform.add(path)
else:
raise ValueError(
"Unhandled response from viewer: {}".format(msg.data.decode()))
def setgeometry(self, path, geomdata):
if isinstance(geomdata, BaseGeometry):
self._load(path, [GeometryData(geomdata)])
elif isinstance(geomdata, Iterable):
self._load(path, geomdata)
else:
self._load(path, [geomdata])
def _load(self, path, geoms):
converted_geom_data = []
for geom in geoms:
if isinstance(geom, GeometryData):
converted_geom_data.append(geom)
else:
converted_geom_data.append(GeometryData(geom))
self.tree.getdescendant(path).geometries = converted_geom_data
self.queue.setgeometry.add(path)
self._maybe_publish()
def settransform(self, path, tform):
self.tree.getdescendant(path).transform = tform
self.queue.settransform.add(path)
self._maybe_publish()
def delete(self, path):
if not path:
self.tree = LazyTree()
else:
t = self.tree.getdescendant(path[:-1])
if path[-1] in t.children:
del t.children[path[-1]]
self.queue.delete.add(path)
self._maybe_publish()
def _maybe_publish(self):
if self.publish_immediately:
self.publish()
def publish(self):
if not self.queue.isempty():
data = self.serialize_queue()
msg = to_lcm(data)
self.lcm.publish(self._request_channel(), msg.encode())
self.queue.empty()
def serialize_queue(self):
delete = []
setgeometry = []
settransform = []
for path in self.queue.delete:
delete.append({"path": path})
for path in self.queue.setgeometry:
geoms = self.tree.getdescendant(path).geometries or []
setgeometry.append({
"path": path,
"geometries": [geom.serialize() for geom in geoms]
})
for path in self.queue.settransform:
settransform.append({
"path": path,
"transform": serialize_transform(
self.tree.getdescendant(path).transform)
})
return {
"utime": int(time.time() * 1e6),
"delete": delete,
"setgeometry": setgeometry,
"settransform": settransform
}
if __name__ == '__main__':
# We can provide an initial path if we want
vis = Visualizer(path="/root/folder1")
# Start a thread to handle responses from the viewer. Doing this enables
# the automatic reloading of missing geometry if the viewer is restarted.
vis.start_handler()
vis["boxes"].setgeometry(
[GeometryData(Box([1, 1, 1]),
color=np.random.rand(4),
transform=transformations.translation_matrix([x, -2, 0]))
for x in range(10)])
# Index into the visualizer to get a sub-tree. vis.__getitem__ is lazily
# implemented, so these sub-visualizers come into being as soon as they're
# asked for
vis = vis["group1"]
box_vis = vis["box"]
sphere_vis = vis["sphere"]
box = Box([1, 1, 1])
geom = GeometryData(box, color=[0, 1, 0, 0.5])
box_vis.setgeometry(geom)
sphere_vis.setgeometry(Sphere(0.5))
sphere_vis.settransform(transformations.translation_matrix([1, 0, 0]))
vis["test"].setgeometry(Triad())
vis["test"].settransform(transformations.concatenate_matrices(
transformations.rotation_matrix(1.0, [0, 0, 1]),
transformations.translation_matrix([-1, 0, 1])))
vis["triad"].setgeometry(Triad())
# Setting the geometry preserves the transform at that path.
# Call settransform(np.eye(4)) if you want to clear the transform.
vis["test"].setgeometry(Triad())
# bug, the sphere is loaded and replaces the previous
# geometry but it is not drawn with the correct color mode
vis["test"].setgeometry(Sphere(0.5))
for theta in np.linspace(0, 2 * np.pi, 100):
vis.settransform(transformations.rotation_matrix(theta, [0, 0, 1]))
time.sleep(0.01)
#vis.delete()
| {
"repo_name": "patmarion/director",
"path": "src/python/director/viewerclient.py",
"copies": "1",
"size": "13512",
"license": "bsd-3-clause",
"hash": -1701496471069745200,
"line_mean": 29.4324324324,
"line_max": 105,
"alpha_frac": 0.5820011841,
"autogenerated": false,
"ratio": 3.9858407079646017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008258504560120366,
"num_lines": 444
} |
from __future__ import absolute_import, division, print_function
import time
import numpy as np
import os
import glob
import warnings
import json
from PIL import Image
from ..datasets import VOT
from ..utils.metrics import poly_iou
from ..utils.viz import show_frame
class ExperimentVOT(object):
r"""Experiment pipeline and evaluation toolkit for VOT dataset.
Notes:
- The tracking results of three types of experiments ``supervised``
``unsupervised`` and ``realtime`` are compatible with the official
VOT toolkit <https://github.com/votchallenge/vot-toolkit/>`.
- TODO: The evaluation function for VOT tracking results is still
under development.
Args:
root_dir (string): Root directory of VOT dataset where sequence
folders exist.
version (integer, optional): Specify the VOT dataset version. Specify as
one of 2013~2018. Default is 2017.
list_file (string, optional): If provided, only run experiments over
sequences specified by the file.
read_image (boolean, optional): If True, return the read PIL image in
each frame. Otherwise only return the image path. Default is True.
experiments (string or tuple): Specify the type(s) of experiments to run.
Default is a tuple (``supervised``, ``unsupervised``, ``realtime``).
result_dir (string, optional): Directory for storing tracking
results. Default is ``./results``.
report_dir (string, optional): Directory for storing performance
evaluation results. Default is ``./reports``.
"""
def __init__(self, root_dir, version=2017,
read_image=True, list_file=None,
experiments=('supervised', 'unsupervised', 'realtime'),
result_dir='results', report_dir='reports'):
super(ExperimentVOT, self).__init__()
if isinstance(experiments, str):
experiments = (experiments,)
assert all([e in ['supervised', 'unsupervised', 'realtime']
for e in experiments])
self.dataset = VOT(
root_dir, version, anno_type='default',
download=True, return_meta=True, list_file=list_file)
self.experiments = experiments
if version == 'LT2018':
version = '-' + version
self.read_image = read_image
self.result_dir = os.path.join(result_dir, 'VOT' + str(version))
self.report_dir = os.path.join(report_dir, 'VOT' + str(version))
self.skip_initialize = 5
self.burnin = 10
self.repetitions = 15
self.sensitive = 100
self.nbins_eao = 1500
self.tags = ['camera_motion', 'illum_change', 'occlusion',
'size_change', 'motion_change', 'empty']
def run(self, tracker, visualize=False):
print('Running tracker %s on %s...' % (
tracker.name, type(self.dataset).__name__))
# run all specified experiments
if 'supervised' in self.experiments:
self.run_supervised(tracker, visualize)
if 'unsupervised' in self.experiments:
self.run_unsupervised(tracker, visualize)
if 'realtime' in self.experiments:
self.run_realtime(tracker, visualize)
def run_supervised(self, tracker, visualize=False):
print('Running supervised experiment...')
# loop over the complete dataset
for s, (img_files, anno, _) in enumerate(self.dataset):
seq_name = self.dataset.seq_names[s]
print('--Sequence %d/%d: %s' % (s + 1, len(self.dataset), seq_name))
# rectangular bounding boxes
anno_rects = anno.copy()
if anno_rects.shape[1] == 8:
anno_rects = self.dataset._corner2rect(anno_rects)
# run multiple repetitions for each sequence
for r in range(self.repetitions):
# check if the tracker is deterministic
if r > 0 and tracker.is_deterministic:
break
elif r == 3 and self._check_deterministic('baseline', tracker.name, seq_name):
print(' Detected a deterministic tracker, ' +
'skipping remaining trials.')
break
print(' Repetition: %d' % (r + 1))
# skip if results exist
record_file = os.path.join(
self.result_dir, tracker.name, 'baseline', seq_name,
'%s_%03d.txt' % (seq_name, r + 1))
if os.path.exists(record_file):
print(' Found results, skipping', seq_name)
continue
# state variables
boxes = []
times = []
failure = False
next_start = -1
# tracking loop
for f, img_file in enumerate(img_files):
image = Image.open(img_file)
if self.read_image:
frame = image
else:
frame = img_file
start_time = time.time()
if f == 0:
# initial frame
tracker.init(frame, anno_rects[0])
boxes.append([1])
elif failure:
# during failure frames
if f == next_start:
failure = False
tracker.init(frame, anno_rects[f])
boxes.append([1])
else:
start_time = np.NaN
boxes.append([0])
else:
# during success frames
box = tracker.update(frame)
iou = poly_iou(anno[f], box, bound=image.size)
if iou <= 0.0:
# tracking failure
failure = True
next_start = f + self.skip_initialize
boxes.append([2])
else:
# tracking succeed
boxes.append(box)
# store elapsed time
times.append(time.time() - start_time)
# visualize if required
if visualize:
if len(boxes[-1]) == 4:
show_frame(image, boxes[-1])
else:
show_frame(image)
# record results
self._record(record_file, boxes, times)
def run_unsupervised(self, tracker, visualize=False):
print('Running unsupervised experiment...')
# loop over the complete dataset
for s, (img_files, anno, _) in enumerate(self.dataset):
seq_name = self.dataset.seq_names[s]
print('--Sequence %d/%d: %s' % (s + 1, len(self.dataset), seq_name))
# skip if results exist
record_file = os.path.join(
self.result_dir, tracker.name, 'unsupervised', seq_name,
'%s_001.txt' % seq_name)
if os.path.exists(record_file):
print(' Found results, skipping', seq_name)
continue
# rectangular bounding boxes
anno_rects = anno.copy()
if anno_rects.shape[1] == 8:
anno_rects = self.dataset._corner2rect(anno_rects)
# tracking loop
boxes, times = tracker.track(
img_files, anno_rects[0], visualize=visualize)
assert len(boxes) == len(anno)
# re-formatting
boxes = list(boxes)
boxes[0] = [1]
# record results
self._record(record_file, boxes, times)
def run_realtime(self, tracker, visualize=False):
print('Running real-time experiment...')
# loop over the complete dataset
for s, (img_files, anno, _) in enumerate(self.dataset):
seq_name = self.dataset.seq_names[s]
print('--Sequence %d/%d: %s' % (s + 1, len(self.dataset), seq_name))
# skip if results exist
record_file = os.path.join(
self.result_dir, tracker.name, 'realtime', seq_name,
'%s_001.txt' % seq_name)
if os.path.exists(record_file):
print(' Found results, skipping', seq_name)
continue
# rectangular bounding boxes
anno_rects = anno.copy()
if anno_rects.shape[1] == 8:
anno_rects = self.dataset._corner2rect(anno_rects)
# state variables
boxes = []
times = []
next_start = 0
failure = False
failed_frame = -1
total_time = 0.0
grace = 3 - 1
offset = 0
# tracking loop
for f, img_file in enumerate(img_files):
image = Image.open(img_file)
if self.read_image:
frame = image
else:
frame = img_file
start_time = time.time()
if f == next_start:
# during initial frames
tracker.init(frame, anno_rects[f])
boxes.append([1])
# reset state variables
failure = False
failed_frame = -1
total_time = 0.0
grace = 3 - 1
offset = f
elif not failure:
# during success frames
# calculate current frame
if grace > 0:
total_time += 1000.0 / 25
grace -= 1
else:
total_time += max(1000.0 / 25, last_time * 1000.0)
current = offset + int(np.round(np.floor(total_time * 25) / 1000.0))
# delayed/tracked bounding box
if f < current:
box = boxes[-1]
elif f == current:
box = tracker.update(frame)
iou = poly_iou(anno[f], box, bound=image.size)
if iou <= 0.0:
# tracking failure
failure = True
failed_frame = f
next_start = current + self.skip_initialize
boxes.append([2])
else:
# tracking succeed
boxes.append(box)
else:
# during failure frames
if f < current:
# skipping frame due to slow speed
boxes.append([0])
start_time = np.NaN
elif f == current:
# current frame
box = tracker.update(frame)
iou = poly_iou(anno[f], box, bound=image.size)
if iou <= 0.0:
# tracking failure
boxes.append([2])
boxes[failed_frame] = [0]
times[failed_frame] = np.NaN
else:
# tracking succeed
boxes.append(box)
elif f < next_start:
# skipping frame due to failure
boxes.append([0])
start_time = np.NaN
# store elapsed time
last_time = time.time() - start_time
times.append(last_time)
# visualize if required
if visualize:
if len(boxes[-1]) == 4:
show_frame(image, boxes[-1])
else:
show_frame(image)
# record results
self._record(record_file, boxes, times)
def report(self, tracker_names):
assert isinstance(tracker_names, (list, tuple))
# function for loading results
def read_record(filename):
with open(filename) as f:
record = f.read().strip().split('\n')
record = [[float(t) for t in line.split(',')]
for line in record]
return record
# assume tracker_names[0] is your tracker
report_dir = os.path.join(self.report_dir, tracker_names[0])
if not os.path.exists(report_dir):
os.makedirs(report_dir)
report_file = os.path.join(report_dir, 'performance.json')
performance = {}
for name in tracker_names:
print('Evaluating', name)
ious = {}
ious_full = {}
failures = {}
times = {}
masks = {} # frame masks for attribute tags
for s, (img_files, anno, meta) in enumerate(self.dataset):
seq_name = self.dataset.seq_names[s]
# initialize frames scores
frame_num = len(img_files)
ious[seq_name] = np.full(
(self.repetitions, frame_num), np.nan, dtype=float)
ious_full[seq_name] = np.full(
(self.repetitions, frame_num), np.nan, dtype=float)
failures[seq_name] = np.full(
(self.repetitions, frame_num), np.nan, dtype=float)
times[seq_name] = np.full(
(self.repetitions, frame_num), np.nan, dtype=float)
# read results of all repetitions
record_files = sorted(glob.glob(os.path.join(
self.result_dir, name, 'baseline', seq_name,
'%s_[0-9]*.txt' % seq_name)))
boxes = [read_record(f) for f in record_files]
assert all([len(b) == len(anno) for b in boxes])
# calculate frame ious with burnin
bound = Image.open(img_files[0]).size
seq_ious = [self._calc_iou(b, anno, bound, burnin=True)
for b in boxes]
ious[seq_name][:len(seq_ious), :] = seq_ious
# calculate frame ious without burnin
seq_ious_full = [self._calc_iou(b, anno, bound)
for b in boxes]
ious_full[seq_name][:len(seq_ious_full), :] = seq_ious_full
# calculate frame failures
seq_failures = [
[len(b) == 1 and b[0] == 2 for b in boxes_per_rep]
for boxes_per_rep in boxes]
failures[seq_name][:len(seq_failures), :] = seq_failures
# collect frame runtimes
time_file = os.path.join(
self.result_dir, name, 'baseline', seq_name,
'%s_time.txt' % seq_name)
if os.path.exists(time_file):
seq_times = np.loadtxt(time_file, delimiter=',').T
times[seq_name][:len(seq_times), :] = seq_times
# collect attribute masks
tag_num = len(self.tags)
masks[seq_name] = np.zeros((tag_num, frame_num), bool)
for i, tag in enumerate(self.tags):
if tag in meta:
masks[seq_name][i, :] = meta[tag]
# frames with no tags
if 'empty' in self.tags:
tag_frames = np.array([
v for k, v in meta.items()
if not 'practical' in k], dtype=bool)
ind = self.tags.index('empty')
masks[seq_name][ind, :] = \
~np.logical_or.reduce(tag_frames, axis=0)
# concatenate frames
seq_names = self.dataset.seq_names
masks = np.concatenate(
[masks[s] for s in seq_names], axis=1)
ious = np.concatenate(
[ious[s] for s in seq_names], axis=1)
failures = np.concatenate(
[failures[s] for s in seq_names], axis=1)
with warnings.catch_warnings():
# average over repetitions
warnings.simplefilter('ignore', category=RuntimeWarning)
ious = np.nanmean(ious, axis=0)
failures = np.nanmean(failures, axis=0)
# calculate average overlaps and failures for each tag
tag_ious = np.array(
[np.nanmean(ious[m]) for m in masks])
tag_failures = np.array(
[np.nansum(failures[m]) for m in masks])
tag_frames = masks.sum(axis=1)
# remove nan values
tag_ious[np.isnan(tag_ious)] = 0.0
tag_weights = tag_frames / tag_frames.sum()
# calculate weighted accuracy and robustness
accuracy = np.sum(tag_ious * tag_weights)
robustness = np.sum(tag_failures * tag_weights)
# calculate tracking speed
times = np.concatenate([
t.reshape(-1) for t in times.values()])
# remove invalid values
times = times[~np.isnan(times)]
times = times[times > 0]
if len(times) > 0:
speed = np.mean(1. / times)
else:
speed = -1
performance.update({name: {
'accuracy': accuracy,
'robustness': robustness,
'speed_fps': speed}})
# save performance
with open(report_file, 'w') as f:
json.dump(performance, f, indent=4)
print('Performance saved at', report_file)
return performance
def show(self, tracker_names, seq_names=None, play_speed=1,
experiment='supervised'):
if seq_names is None:
seq_names = self.dataset.seq_names
elif isinstance(seq_names, str):
seq_names = [seq_names]
assert isinstance(tracker_names, (list, tuple))
assert isinstance(seq_names, (list, tuple))
assert experiment in ['supervised', 'unsupervised', 'realtime']
play_speed = int(round(play_speed))
assert play_speed > 0
# "supervised" experiment results are stored in "baseline" folder
if experiment == 'supervised':
experiment = 'baseline'
# function for loading results
def read_record(filename):
with open(filename) as f:
record = f.read().strip().split('\n')
record = [[float(t) for t in line.split(',')]
for line in record]
for i, r in enumerate(record):
if len(r) == 4:
record[i] = np.array(r)
elif len(r) == 8:
r = np.array(r)[np.newaxis, :]
r = self.dataset._corner2rect(r)
record[i] = r[0]
else:
record[i] = np.zeros(4)
return record
for s, seq_name in enumerate(seq_names):
print('[%d/%d] Showing results on %s...' % (
s + 1, len(seq_names), seq_name))
# load all tracking results
records = {}
for name in tracker_names:
record_file = os.path.join(
self.result_dir, name, experiment, seq_name,
'%s_001.txt' % seq_name)
records[name] = read_record(record_file)
# loop over the sequence and display results
img_files, anno, _ = self.dataset[seq_name]
if anno.shape[1] == 8:
anno = self.dataset._corner2rect(anno)
for f, img_file in enumerate(img_files):
if not f % play_speed == 0:
continue
image = Image.open(img_file)
boxes = [anno[f]] + [
records[name][f] for name in tracker_names]
show_frame(image, boxes,
legends=['GroundTruth'] + tracker_names,
colors=['w', 'r', 'g', 'b', 'c', 'm', 'y',
'orange', 'purple', 'brown', 'pink'])
def _record(self, record_file, boxes, times):
# convert boxes to string
lines = []
for box in boxes:
if len(box) == 1:
lines.append('%d' % box[0])
else:
lines.append(str.join(',', ['%.4f' % t for t in box]))
# record bounding boxes
record_dir = os.path.dirname(record_file)
if not os.path.isdir(record_dir):
os.makedirs(record_dir)
with open(record_file, 'w') as f:
f.write(str.join('\n', lines))
print(' Results recorded at', record_file)
# convert times to string
lines = ['%.4f' % t for t in times]
lines = [t.replace('nan', 'NaN') for t in lines]
# record running times
time_file = record_file[:record_file.rfind('_')] + '_time.txt'
if os.path.exists(time_file):
with open(time_file) as f:
exist_lines = f.read().strip().split('\n')
lines = [t + ',' + s for t, s in zip(exist_lines, lines)]
with open(time_file, 'w') as f:
f.write(str.join('\n', lines))
def _check_deterministic(self, exp, tracker_name, seq_name):
record_dir = os.path.join(
self.result_dir, tracker_name, exp, seq_name)
record_files = sorted(glob.glob(os.path.join(
record_dir, '%s_[0-9]*.txt' % seq_name)))
if len(record_files) < 3:
return False
records = []
for record_file in record_files:
with open(record_file, 'r') as f:
records.append(f.read())
return len(set(records)) == 1
def _calc_iou(self, boxes, anno, bound, burnin=False):
# skip initialization frames
if burnin:
boxes = boxes.copy()
init_inds = [i for i, box in enumerate(boxes)
if box == [1.0]]
for ind in init_inds:
boxes[ind:ind + self.burnin] = [[0]] * self.burnin
# calculate polygon ious
ious = np.array([poly_iou(np.array(a), b, bound)
if len(a) > 1 else np.NaN
for a, b in zip(boxes, anno)])
return ious
| {
"repo_name": "got-10k/toolkit",
"path": "got10k/experiments/vot.py",
"copies": "1",
"size": "22933",
"license": "mit",
"hash": -2196561216249271000,
"line_mean": 39.0926573427,
"line_max": 94,
"alpha_frac": 0.4772162386,
"autogenerated": false,
"ratio": 4.434925546315993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015231623254177119,
"num_lines": 572
} |
from __future__ import absolute_import, division, print_function
import time
import os
import construct
import idna
from tlsenum import hello_constructs
from tlsenum.mappings import (
CipherSuites, ECCurves, ECPointFormat, TLSProtocolVersion
)
class ClientHello(object):
@property
def protocol_version(self):
return self._protocol_version
@protocol_version.setter
def protocol_version(self, protocol_version):
assert protocol_version in ["3.0", "1.0", "1.1", "1.2"]
self._protocol_version = protocol_version
self._protocol_minor = TLSProtocolVersion.index(protocol_version)
@property
def cipher_suites(self):
return self._cipher_suites
@cipher_suites.setter
def cipher_suites(self, cipher_suites):
self._cipher_suites = cipher_suites
@property
def deflate(self):
return self._deflate
@deflate.setter
def deflate(self, deflate):
self._deflate = deflate
if deflate:
self._compression_method = [1, 0]
else:
self._compression_method = [0]
@property
def extensions(self):
return self._extensions
@extensions.setter
def extensions(self, value):
self._extensions = value
def build(self):
protocol_version = construct.Container(
major=3, minor=self._protocol_minor
)
random = construct.Container(
gmt_unix_time=int(time.time()), random_bytes=os.urandom(28)
)
session_id = construct.Container(
length=0, session_id=b""
)
ciphers = construct.Container(
length=len(self._cipher_suites) * 2,
cipher_suites=self._get_bytes_from_cipher_suites(
self._cipher_suites
)
)
compression_method = construct.Container(
length=len(self._compression_method),
compression_methods=self._compression_method
)
client_hello = construct.Container(
version=protocol_version, random=random, session_id=session_id,
cipher_suites=ciphers, compression_methods=compression_method,
extensions_length=len(self._extensions),
extensions_bytes=self._extensions
)
handshake = construct.Container(
handshake_type=1,
length=len(hello_constructs.ClientHello.build(client_hello)),
handshake_struct=client_hello
)
return hello_constructs.TLSPlaintext.build(
construct.Container(
content_type=0x16, version=protocol_version,
length=len(hello_constructs.Handshake.build(handshake)),
content=handshake
)
)
def _get_bytes_from_cipher_suites(self, cipher_suites):
return [CipherSuites[i].value for i in cipher_suites]
class Extensions(object):
def __init__(self):
self._ec_point_format = None
self._ec_curves = None
self._hostname = None
@property
def ec_point_format(self):
return self._ec_point_format
@ec_point_format.setter
def ec_point_format(self, formats):
self._ec_point_format = formats
@property
def ec_curves(self):
return self._ec_curves
@ec_curves.setter
def ec_curves(self, curves):
self._ec_curves = curves
@property
def sni(self):
return self._hostname
@sni.setter
def sni(self, hostname):
self._hostname = hostname
def build(self):
ret = b""
if self._ec_point_format is not None:
ec_point_format_struct = construct.Container(
ec_point_format_length=len(self._ec_point_format),
ec_point_format=self._get_bytes_from_ec_point_format(
self._ec_point_format
)
)
ret += hello_constructs.Extension.build(
construct.Container(
extension_type=11,
extension_length=len(hello_constructs.ECPointFormat.build(
ec_point_format_struct
)),
extension_struct=ec_point_format_struct
)
)
if self._ec_curves is not None:
ec_curves_struct = construct.Container(
ec_curves_length=len(self._ec_curves) * 2,
named_curves=self._get_bytes_from_ec_curves(
self._ec_curves
)
)
ret += hello_constructs.Extension.build(
construct.Container(
extension_type=10,
extension_length=len(hello_constructs.ECCurves.build(
ec_curves_struct
)),
extension_struct=ec_curves_struct
)
)
if self._hostname is not None:
encoded_hostname = idna.encode(self._hostname)
sni_struct = construct.Container(
server_name_list_length=len(encoded_hostname) + 3,
name_type=0,
server_name_length=len(encoded_hostname),
server_name=encoded_hostname
)
ret += hello_constructs.Extension.build(
construct.Container(
extension_type=0,
extension_length=len(hello_constructs.ServerName.build(
sni_struct
)),
extension_struct=sni_struct
)
)
return ret
def _get_bytes_from_ec_point_format(self, ec_point_format):
return [ECPointFormat[i].value for i in ec_point_format]
def _get_bytes_from_ec_curves(self, ec_curves):
return [ECCurves[i].value for i in ec_curves]
class ServerHello(object):
def __init__(self, protocol_version, cipher_suite, deflate):
self._protocol_version = protocol_version
self._cipher_suite = cipher_suite
self._deflate = deflate
@property
def protocol_version(self):
return self._protocol_version
@property
def cipher_suite(self):
return self._cipher_suite
@property
def deflate(self):
return self._deflate
@classmethod
def parse_server_hello(cls, data):
server_hello = hello_constructs.TLSPlaintext.parse(data)
if server_hello.content_type == 21:
if server_hello.content.alert_description == 40:
raise HandshakeFailure()
elif server_hello.content.alert_description == 70:
raise ProtocolVersionFailure()
else:
raise ValueError("Unknown TLS Alert, type {0}".format(
server_hello.content.alert_description
))
protocol_minor = server_hello.content.handshake_struct.version.minor
protocol_version = TLSProtocolVersion[protocol_minor]
cipher_suite = CipherSuites(
server_hello.content.handshake_struct.cipher_suite
).name
if server_hello.content.handshake_struct.compression_method == 1:
deflate = True
else:
deflate = False
return cls(protocol_version, cipher_suite, deflate)
class HandshakeFailure(Exception):
pass
class ProtocolVersionFailure(Exception):
pass
def construct_sslv2_client_hello(): # pragma: no cover
"""
Returns a SSLv2 ClientHello message in bytes.
This is a quick and dirty function to return a SSLv2 ClientHello with all
7 specified cipher suites. I don't really want to enumerate the supported
SSLv2 cipher suites so this doesn't have to be flexible...
This function does not require test coverage because I am simply returning
bytes constructed from a fix list.
"""
return bytes([
0x80, 0x2e, # Length of record
0x01, # Handshake Type (0x01 for ClientHello)
0x00, 0x02, # SSL Version Identifier (0x0002 for SSLv2)
0x00, 0x15, # Length of cipher suites list
0x00, 0x00, # Session ID Length
0x00, 0x10, # Challenge Length
# Cipher suites list
0x01, 0x00, 0x80,
0x02, 0x00, 0x80,
0x03, 0x00, 0x80,
0x04, 0x00, 0x80,
0x05, 0x00, 0x80,
0x06, 0x00, 0x40,
0x07, 0x00, 0xc0,
# Challenge
0x53, 0x43, 0x5b, 0x90, 0x9d, 0x9b, 0x72, 0x0b,
0xbc, 0x0c, 0xbc, 0x2b, 0x92, 0xa8, 0x48, 0x97,
])
| {
"repo_name": "Ayrx/tlsenum",
"path": "tlsenum/parse_hello.py",
"copies": "1",
"size": "8585",
"license": "mit",
"hash": -2468510031012426000,
"line_mean": 28.6034482759,
"line_max": 78,
"alpha_frac": 0.5796156086,
"autogenerated": false,
"ratio": 4.1254204709274385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5205036079527439,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import time
from ..messages import PythonTask
from ..queue import Queue
from ..scheduler import QueueScheduler, Running
from ..utils import timeout
__all__ = ('Pool',
'Queue',
'AsyncResult')
class AsyncResult(object):
def __init__(self, task):
self.task = task
@property
def status(self):
return self.task.status
def get(self, timeout=60):
self.wait(timeout)
if self.successful():
return self.status.data
else:
try:
raise self.status.exception
except TypeError:
raise ValueError('Async result indicate task failed!')
def wait(self, seconds=60):
with timeout(seconds):
while not self.ready():
time.sleep(0.1)
def ready(self):
return self.status.has_terminated()
def successful(self):
return self.status.has_succeeded()
class Pool(Running):
def __init__(self, processes=-1, *args, **kwargs):
self.processes = processes
self.scheduler = QueueScheduler()
super(Pool, self).__init__(self.scheduler, *args, **kwargs)
def close(self):
self.stop()
def terminate(self):
self.stop()
def wait(self, seconds=-1):
self.scheduler.wait(seconds)
def map(self, func, iterable, chunksize=1, **kwargs):
results = self.map_async(func, iterable, chunksize, **kwargs)
return [result.get(timeout=-1) for result in results]
def map_async(self, func, iterable, chunksize=1, callback=None, **kwargs):
return [self.apply_async(func, (item,), **kwargs) for item in iterable]
def apply(self, func, args=[], kwds={}, **kwargs):
result = self.apply_async(func=func, args=args, kwds=kwds, **kwargs)
return result.get(timeout=-1)
def apply_async(self, func, args=[], kwds={}, callback=None, **kwargs):
task = PythonTask(name=kwargs.pop('name', 'multiprocessing'),
fn=func, args=args, kwargs=kwds, **kwargs)
self.scheduler.submit(task)
return AsyncResult(task)
| {
"repo_name": "lensacom/satyr",
"path": "mentor/apis/multiprocessing.py",
"copies": "1",
"size": "2193",
"license": "apache-2.0",
"hash": 6843917770820009000,
"line_mean": 27.4805194805,
"line_max": 79,
"alpha_frac": 0.6019151847,
"autogenerated": false,
"ratio": 4.0686456400742115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5170560824774212,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import time
from workflows.services.common_service import CommonService
class UtilizationStatistics(object):
'''Generate statistics about the percentage of time spent in different
statuses over a fixed time slice. This class is not thread-safe.'''
def __init__(self, summation_period=10):
'''Reports will always cover the most recent period of summation_period
seconds.'''
self.period = summation_period
self.status_history = [ {'start': 0, 'end': None, 'status': CommonService.SERVICE_STATUS_NEW} ]
def update_status(self, new_status):
'''Record a status change with a current timestamp.'''
timestamp = time.time()
self.status_history[-1]['end'] = timestamp
self.status_history.append({'start': timestamp, 'end': None, 'status': new_status})
def report(self):
'''Return a dictionary of different status codes and the percentage of time
spent in each throughout the last summation_period seconds.
Truncate the aggregated history appropriately.'''
timestamp = time.time()
cutoff = timestamp - self.period
truncate = 0
summary = {}
for event in self.status_history[:-1]:
if event['end'] < cutoff:
truncate = truncate + 1
continue
summary[event['status']] = summary.get(event['status'], 0) + \
event['end'] - max(cutoff, event['start'])
summary[self.status_history[-1]['status']] = \
summary.get(self.status_history[-1]['status'], 0) + \
timestamp - max(cutoff, self.status_history[-1]['start'])
if truncate:
self.status_history = self.status_history[truncate:]
total_duration = sum(summary.values())
summary = { s: round(d / total_duration, 4) for s, d in summary.items() }
return summary
| {
"repo_name": "xia2/workflows",
"path": "workflows/frontend/utilization.py",
"copies": "1",
"size": "1847",
"license": "bsd-3-clause",
"hash": 2080981114472612000,
"line_mean": 40.9772727273,
"line_max": 99,
"alpha_frac": 0.6599891716,
"autogenerated": false,
"ratio": 4.095343680709535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006419930901490745,
"num_lines": 44
} |
from __future__ import absolute_import, division, print_function
import time
import appr.models.kv
from appr.exception import ResourceNotFound, UnableToLockResource
from appr.models.kv.filesystem import filesystem_client
from appr.models.kv.models_index_base import ModelsIndexBase
class ModelsIndexFilesystem(ModelsIndexBase):
def _fetch_raw_data(self, path):
path = appr.models.kv.APPR_KV_PREFIX + path
datablob = filesystem_client.get(path)
if datablob is None:
raise ResourceNotFound("resource %s not found" % path, {"path": path})
package_data = datablob
return package_data
def _write_raw_data(self, key, data):
path = appr.models.kv.APPR_KV_PREFIX + key
filesystem_client.set(path, data)
def _delete_data(self, key):
path = appr.models.kv.APPR_KV_PREFIX + key
return filesystem_client.delete(path)
def _get_lock(self, lock_key, ttl=3, timeout=4):
if timeout is not None:
timeout_time = time.time() + timeout
while True:
if filesystem_client.lockttl(lock_key, ttl):
return True
else:
if timeout is None or time.time() > timeout_time:
raise UnableToLockResource("%s already locked" % lock_key, {
"lock_key": lock_key,
"ttl": ttl})
else:
time.sleep(0.2)
def _lock_key(self, key):
return "%s%s.lock" % (appr.models.kv.APPR_KV_PREFIX, key)
def _release_lock(self, lock_key):
filesystem_client.delete(lock_key)
| {
"repo_name": "app-registry/appr",
"path": "appr/models/kv/filesystem/models_index.py",
"copies": "2",
"size": "1633",
"license": "apache-2.0",
"hash": 1575734404513044700,
"line_mean": 34.5,
"line_max": 82,
"alpha_frac": 0.6099203919,
"autogenerated": false,
"ratio": 3.6862302483069977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5296150640206997,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import time
import appr.models.kv
from appr.exception import ResourceNotFound, UnableToLockResource
from appr.models.kv.models_index_base import ModelsIndexBase
from appr.models.kv.redis import redis_client
class ModelsIndexRedis(ModelsIndexBase):
def _fetch_raw_data(self, path):
path = appr.models.kv.APPR_KV_PREFIX + path
datablob = redis_client.get(path)
if datablob is None:
raise ResourceNotFound("resource %s not found" % path, {"path": path})
package_data = datablob
return package_data
def _write_raw_data(self, key, data):
path = appr.models.kv.APPR_KV_PREFIX + key
redis_client.set(path, data)
def _delete_data(self, key):
path = appr.models.kv.APPR_KV_PREFIX + key
return redis_client.delete(path) == 1
def _get_lock(self, lock_key, ttl=3, timeout=4):
if timeout is not None:
timeout_time = time.time() + timeout # 5 minutes from now
while True:
if redis_client.set(lock_key, 'locked', nx=True, ex=ttl):
return True
else:
if timeout is None or time.time() > timeout_time:
raise UnableToLockResource("%s already locked" % lock_key, {
"lock_key": lock_key,
"ttl": ttl})
else:
time.sleep(0.2)
def _lock_key(self, key):
return "%s%s.lock" % (appr.models.kv.APPR_KV_PREFIX, key)
def _release_lock(self, lock_key):
redis_client.delete(lock_key)
| {
"repo_name": "app-registry/appr",
"path": "appr/models/kv/redis/models_index.py",
"copies": "2",
"size": "1638",
"license": "apache-2.0",
"hash": 6950786190620633000,
"line_mean": 34.6086956522,
"line_max": 82,
"alpha_frac": 0.5995115995,
"autogenerated": false,
"ratio": 3.576419213973799,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5175930813473799,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import time
import etcd
import appr.models.kv
from appr.exception import ResourceNotFound, UnableToLockResource
from appr.models.kv.etcd import etcd_client
from appr.models.kv.models_index_base import ModelsIndexBase
class ModelsIndexEtcd(ModelsIndexBase):
def _fetch_raw_data(self, path):
path = appr.models.kv.APPR_KV_PREFIX + path
try:
data = etcd_client.read(path).value
except etcd.EtcdKeyError as excp:
raise ResourceNotFound(excp.message, {"path": path})
return data
def _write_raw_data(self, key, data):
path = appr.models.kv.APPR_KV_PREFIX + key
etcd_client.write(path, data)
def _delete_data(self, key):
path = appr.models.kv.APPR_KV_PREFIX + key
try:
etcd_client.delete(path)
except etcd.EtcdKeyError:
pass
def _get_lock(self, lock_key, ttl=3, timeout=4):
if timeout is not None:
timeout_time = time.time() + timeout # 5 minutes from now
while True:
try:
etcd_client.write(lock_key, 'lock', prevExist=False, ttl=ttl)
return True
except etcd.EtcdAlreadyExist:
if timeout is None or time.time() > timeout_time:
raise UnableToLockResource("%s already locked" % lock_key, {
"lock_key": lock_key,
"ttl": ttl})
else:
time.sleep(0.2)
def _lock_key(self, key):
return "%s%s.lock" % (appr.models.kv.APPR_KV_PREFIX, key)
def _release_lock(self, lock_key):
return self._delete_data(lock_key.replace(appr.models.kv.APPR_KV_PREFIX, ""))
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/models/kv/etcd/models_index.py",
"copies": "2",
"size": "1775",
"license": "apache-2.0",
"hash": 5426085283039726000,
"line_mean": 33.1346153846,
"line_max": 85,
"alpha_frac": 0.5949295775,
"autogenerated": false,
"ratio": 3.514851485148515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5109781062648514,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import time
import numpy as np
import tensorflow as tf
import logging
from tensorflow.contrib import layers, learn
import gym
from deep_rl.graphs import create_vpg_graph
from deep_rl.trajectories import compute_vpg_advantage, sample_traj
from deep_rl.misc import categorical_sample
from six.moves import range
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("name", "CartPole-v0", "Name of the environment to train/play")
flags.DEFINE_float("gamma", 0.99, "Discount rate")
flags.DEFINE_float("gae_lambda", 1, "Lambda for GAE")
flags.DEFINE_float("learning_rate", 0.05, "Learning rate")
flags.DEFINE_integer("batch_size", 2000, "Number of timesteps per batch")
flags.DEFINE_bool("render", False, "Render environment during training")
flags.DEFINE_integer("seed", 0, "Random seed")
flags.DEFINE_string("outdir", "", "Prefix for monitoring, summary and checkpoint directories")
flags.DEFINE_integer("save_model_interval", 120, "Interval to save model (seconds)")
flags.DEFINE_integer("save_summaries_interval", 120, "Interval to save summaries (seconds)")
def policy_model(input_shape, hidden_sizes, n_action):
states = tf.placeholder("float", shape=input_shape)
hiddens = learn.ops.dnn(states, hidden_sizes, activation=tf.nn.relu)
return states, tf.nn.softmax(layers.fully_connected(hiddens, n_action))
def value_model(input_shape, hidden_sizes):
states = tf.placeholder("float", shape=input_shape)
hiddens = learn.ops.dnn(states, hidden_sizes, activation=tf.nn.relu)
return states, layers.fully_connected(hiddens, 1)
env = gym.make(FLAGS.name)
np.random.seed(FLAGS.seed)
tf.set_random_seed(FLAGS.seed)
env.seed(FLAGS.seed)
n_action = env.action_space.n
input_shape = (None,) + env.observation_space.shape
monitor_dir = FLAGS.outdir + '/monitor'
logging.getLogger().setLevel(logging.DEBUG)
env.monitor.start(monitor_dir, resume=True, video_callable=False)
def main(unused_args):
g = tf.Graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with g.as_default(), tf.device('/cpu:0'):
pf_opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
vf_opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
pf_model = lambda: policy_model(input_shape, [32], n_action)
vf_model = lambda: value_model(input_shape, [32])
graph_ops = create_vpg_graph(n_action, pf_model, vf_model, pf_opt, vf_opt)
sv = tf.train.Supervisor(g,
logdir=FLAGS.outdir,
save_model_secs=FLAGS.save_model_interval,
save_summaries_secs=FLAGS.save_summaries_interval)
with sv.managed_session(config=config) as sess:
actions = graph_ops["actions"]
advantages = graph_ops["advantages"]
returns = graph_ops["returns"]
pf_input = graph_ops["policy_input"]
pf_train_op = graph_ops["policy_train_op"]
pf_probs_op = graph_ops["probs"]
vf_input = graph_ops["value_input"]
vf_predict_op = graph_ops["value"]
vf_train_op = graph_ops["value_train_op"]
def pf_action(x):
probs = sess.run(pf_probs_op, feed_dict={pf_input: x.reshape(1, -1)})
return categorical_sample(probs)[0]
vf_predict = lambda x: sess.run(vf_predict_op, feed_dict={vf_input: x})[:, 0]
total_steps = 0
while True:
trajs = []
t0 = time.time()
timesteps_count = 0
trajs_count = 0
while timesteps_count < FLAGS.batch_size:
t = sample_traj(env, pf_action, max_traj_len=env.spec.timestep_limit)
trajs.append(t)
timesteps_count += len(t["actions"])
trajs_count += 1
compute_vpg_advantage(trajs, vf_predict, FLAGS.gamma, FLAGS.gae_lambda)
all_states = np.concatenate([t["states"] for t in trajs])
all_acts = np.concatenate([t["actions"] for t in trajs])
all_rets = np.concatenate([t["returns"] for t in trajs])
all_advs = np.concatenate([t["advantages"] for t in trajs])
# train models
sess.run(vf_train_op, feed_dict={vf_input: all_states, returns: all_rets})
sess.run(pf_train_op,
feed_dict={pf_input: all_states,
advantages: all_advs,
actions: all_acts})
reward_sums = np.array([t["rewards_sum"] for t in trajs])
reward_mean = reward_sums.mean()
reward_std = reward_sums.std() / np.sqrt(len(reward_sums))
total_steps += timesteps_count
print("------------------------")
print("Total timesteps {}".format(total_steps))
print("Number of timesteps {}".format(timesteps_count))
print("Trajectories sampled {}".format(trajs_count))
print("Max Reward = {:.2f}".format(np.max(reward_sums)))
print("Average Reward = {:.2f} +- {:.2f}".format(reward_mean, reward_std))
print("Time taken = {:.2f}".format(time.time() - t0))
print("------------------------")
# render after each iteration
if FLAGS.render:
sample_traj(env, pf_action, max_traj_len=env.spec.timestep_limit, render=True)
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "domluna/deep_rl",
"path": "examples/run_vpg.py",
"copies": "1",
"size": "5686",
"license": "mit",
"hash": -6370002256699466000,
"line_mean": 39.9064748201,
"line_max": 98,
"alpha_frac": 0.5944424903,
"autogenerated": false,
"ratio": 3.630906768837803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.971854927827051,
"avg_score": 0.0013599961734586832,
"num_lines": 139
} |
from __future__ import absolute_import, division, print_function
import time
# TODO: change thrown errors to these
from concurrent.futures import ALL_COMPLETED, CancelledError, TimeoutError
from ..messages import PythonTask
from ..scheduler import QueueScheduler, Running
from ..utils import timeout as seconds
__all__ = ('MesosPoolExecutor',
'Future')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
raise NotImplementedError()
def as_completed(fs, timeout=None):
raise NotImplementedError()
class Future(object):
def __init__(self, task):
self.task = task
@property
def status(self):
return self.task.status
def cancel(self):
raise NotImplementedError()
def cancelled(self):
return self.status.has_killed()
def running(self):
return (self.status.is_running() or
self.status.is_starting() or
self.status.is_staging())
def done(self):
return self.status.has_killed() or self.status.has_finished()
def result(self, timeout=None):
with seconds(timeout):
while not self.status.has_terminated():
time.sleep(0.1)
if self.status.has_finished():
return self.status.data
else:
try:
print(self.status.data)
raise self.status.exception
except TypeError:
raise ValueError(
'Future result indicates that task failed!')
def exception(self, timeout=None):
with seconds(timeout):
while not self.status.has_terminated():
time.sleep(0.1)
if self.status.has_finished():
return None
else:
return self.status.exception
def add_done_callback(self, fn):
raise NotImplementedError()
class MesosPoolExecutor(Running):
def __init__(self, max_workers=-1, *args, **kwargs):
self.max_worker = max_workers # TODO
self.scheduler = QueueScheduler()
super(MesosPoolExecutor, self).__init__(
self.scheduler, *args, **kwargs)
def submit(self, fn, args=[], kwargs={}, **kwds):
task = PythonTask(fn=fn, args=args, kwargs=kwargs,
name=kwds.pop('name', 'futures'), **kwds)
self.scheduler.submit(task)
return Future(task)
def map(self, func, *iterables, **kwargs):
timeout = kwargs.pop('timeout', None)
chunksize = kwargs.pop('chunksize', 1)
for item in zip(*iterables):
future = self.submit(func, args=item, **kwargs)
yield future.result(timeout=timeout)
def shutdown(self, wait=True):
if wait:
self.scheduler.wait(-1)
self.stop()
| {
"repo_name": "lensacom/satyr",
"path": "mentor/apis/futures.py",
"copies": "1",
"size": "2835",
"license": "apache-2.0",
"hash": -7902067813243300000,
"line_mean": 27.9285714286,
"line_max": 74,
"alpha_frac": 0.5873015873,
"autogenerated": false,
"ratio": 4.288956127080182,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5376257714380182,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import toolz
from datashape import Record, DataShape, dshape
from datashape import coretypes as ct
import datashape
from numpy import inf
from .core import common_subexpression
from .expressions import Expr, ndim
class Reduction(Expr):
""" A column-wise reduction
Blaze supports the same class of reductions as NumPy and Pandas.
sum, min, max, any, all, mean, var, std, count, nunique
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = t['amount'].sum()
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> compute(e, data)
350
"""
__slots__ = '_hash', '_child', 'axis', 'keepdims'
def __init__(self, _child, axis=None, keepdims=False):
self._child = _child
if axis is None:
axis = tuple(range(_child.ndim))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
axis = tuple(sorted(axis))
self.axis = axis
self.keepdims = keepdims
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in axis)
return DataShape(*(shape + (self.schema,)))
@property
def schema(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
result = toolz.first(schema.types)
else:
result = schema
return DataShape(result)
@property
def symbol(self):
return type(self).__name__
@property
def _name(self):
try:
return self._child._name + '_' + type(self).__name__
except (AttributeError, ValueError, TypeError):
return type(self).__name__
def __str__(self):
kwargs = list()
if self.keepdims:
kwargs.append('keepdims=True')
if self.axis != tuple(range(self._child.ndim)):
kwargs.append('axis=' + str(self.axis))
other = sorted(set(self.__slots__[1:]) - set(['_child', 'axis', 'keepdims']))
for slot in other:
kwargs.append('%s=%s' % (slot, getattr(self, slot)))
name = type(self).__name__
if kwargs:
return '%s(%s, %s)' % (name, self._child, ', '.join(kwargs))
else:
return '%s(%s)' % (name, self._child)
class any(Reduction):
schema = dshape(ct.bool_)
class all(Reduction):
schema = dshape(ct.bool_)
class sum(Reduction):
@property
def schema(self):
return DataShape(datashape.maxtype(super(sum, self).schema))
class max(Reduction):
pass
class min(Reduction):
pass
class mean(Reduction):
schema = dshape(ct.real)
class var(Reduction):
"""Variance
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute an unbiased estimate of the population variance if this is
``True``. In NumPy and pandas, this parameter is called ``ddof`` (delta
degrees of freedom) and is equal to 1 for unbiased and 0 for biased.
"""
__slots__ = '_hash', '_child', 'unbiased', 'axis', 'keepdims'
schema = dshape(ct.real)
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
super(var, self).__init__(child, *args, **kwargs)
class std(Reduction):
"""Standard Deviation
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute the square root of an unbiased estimate of the population
variance if this is ``True``.
.. warning::
This does *not* return an unbiased estimate of the population
standard deviation.
See Also
--------
var
"""
__slots__ = '_hash', '_child', 'unbiased', 'axis', 'keepdims'
schema = dshape(ct.real)
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
super(std, self).__init__(child, *args, **kwargs)
class count(Reduction):
""" The number of non-null elements """
schema = dshape(ct.int32)
class nunique(Reduction):
schema = dshape(ct.int32)
class nelements(Reduction):
"""Compute the number of elements in a collection, including missing values.
See Also
---------
blaze.expr.reductions.count: compute the number of non-null elements
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: float64}')
>>> t[t.amount < 1].nelements()
nelements(t[t.amount < 1])
"""
schema = dshape(ct.int32)
def nrows(expr):
return nelements(expr, axis=(0,))
class Summary(Expr):
""" A collection of named reductions
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = summary(number=t.id.nunique(), sum=t.amount.sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 1]]
>>> from blaze import compute
>>> compute(expr, data)
(2, 350)
"""
__slots__ = '_hash', '_child', 'names', 'values', 'axis', 'keepdims'
def __init__(self, _child, names, values, axis=None, keepdims=False):
self._child = _child
self.names = names
self.values = values
self.keepdims = keepdims
self.axis = axis
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in axis)
measure = Record(list(zip(self.names,
[v.schema for v in self.values])))
return DataShape(*(shape + (measure,)))
def __str__(self):
s = 'summary('
s += ', '.join('%s=%s' % (name, str(val))
for name, val in zip(self.fields, self.values))
if self.keepdims:
s += ', keepdims=True'
s += ')'
return s
def summary(keepdims=False, axis=None, **kwargs):
items = sorted(kwargs.items(), key=toolz.first)
names = tuple(map(toolz.first, items))
values = tuple(map(toolz.second, items))
child = common_subexpression(*values)
if len(kwargs) == 1 and not iscollection(child.dshape):
while not iscollection(child.dshape):
children = [i for i in child._inputs if isinstance(i, Expr)]
if len(children) == 1:
child = children[0]
else:
child = common_subexpression(*children)
if axis is None:
axis = tuple(range(ndim(child)))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
return Summary(child, names, values, keepdims=keepdims, axis=axis)
summary.__doc__ = Summary.__doc__
def vnorm(expr, ord=None, axis=None, keepdims=False):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == inf:
return max(abs(expr), axis=axis, keepdims=keepdims)
elif ord == -inf:
return min(abs(expr), axis=axis, keepdims=keepdims)
elif ord == 1:
return sum(abs(expr), axis=axis, keepdims=keepdims)
elif ord % 2 == 0:
return sum(expr**ord, axis=axis, keepdims=keepdims)**(1./ord)
else:
return sum(abs(expr)**ord, axis=axis, keepdims=keepdims)**(1./ord)
from datashape.predicates import iscollection, isboolean, isnumeric
from .expressions import dshape_method_list, method_properties
dshape_method_list.extend([
(iscollection, set([count, min, max, nelements])),
(lambda ds: len(ds.shape) == 1,
set([nrows, nunique])),
(lambda ds: iscollection(ds) and isboolean(ds),
set([any, all, sum])),
(lambda ds: iscollection(ds) and isnumeric(ds),
set([mean, sum, mean, min, max, std, var, vnorm])),
])
method_properties.update([nrows])
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/expr/reductions.py",
"copies": "1",
"size": "8630",
"license": "bsd-3-clause",
"hash": -6729876540808689000,
"line_mean": 26.6602564103,
"line_max": 85,
"alpha_frac": 0.5609501738,
"autogenerated": false,
"ratio": 3.7685589519650655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4829509125765065,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import toolz
from toolz import first
import datashape
from datashape import Record, dshape, DataShape
from datashape import coretypes as ct
from datashape.predicates import isscalar, iscollection
from .core import common_subexpression
from .expressions import Expr, Symbol
class Reduction(Expr):
""" A column-wise reduction
Blaze supports the same class of reductions as NumPy and Pandas.
sum, min, max, any, all, mean, var, std, count, nunique
Examples
--------
>>> t = Symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = t['amount'].sum()
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> compute(e, data)
350
"""
__slots__ = '_child', 'axis', 'keepdims'
_dtype = None
def __init__(self, _child, axis=None, keepdims=False):
self._child = _child
if axis is None:
axis = tuple(range(_child.ndim))
if isinstance(axis, (set, list)):
axis = tuple(axis)
if not isinstance(axis, tuple):
axis = (axis,)
axis = tuple(sorted(axis))
self.axis = axis
self.keepdims = keepdims
@property
def dshape(self):
axis = self.axis
if self.keepdims:
shape = tuple(1 if i in self.axis else d
for i, d in enumerate(self._child.shape))
else:
shape = tuple(d
for i, d in enumerate(self._child.shape)
if i not in self.axis)
return DataShape(*(shape + (self._dtype,)))
@property
def symbol(self):
return type(self).__name__
@property
def _name(self):
try:
return self._child._name + '_' + type(self).__name__
except (AttributeError, ValueError, TypeError):
return type(self).__name__
class any(Reduction):
_dtype = ct.bool_
class all(Reduction):
_dtype = ct.bool_
class sum(Reduction):
@property
def _dtype(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
return first(schema.types)
else:
return schema
class max(Reduction):
@property
def _dtype(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
return first(schema.types)
else:
return schema
class min(Reduction):
@property
def _dtype(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
return first(schema.types)
else:
return schema
class mean(Reduction):
_dtype = ct.real
class var(Reduction):
"""Variance
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute an unbiased estimate of the population variance if this is
``True``. In NumPy and pandas, this parameter is called ``ddof`` (delta
degrees of freedom) and is equal to 1 for unbiased and 0 for biased.
"""
__slots__ = '_child', 'unbiased', 'axis', 'keepdims'
_dtype = ct.real
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
Reduction.__init__(self, child, *args, **kwargs)
class std(Reduction):
"""Standard Deviation
Parameters
----------
child : Expr
An expression
unbiased : bool, optional
Compute the square root of an unbiased estimate of the population
variance if this is ``True``.
.. warning::
This does *not* return an unbiased estimate of the population
standard deviation.
See Also
--------
var
"""
__slots__ = '_child', 'unbiased', 'axis', 'keepdims'
_dtype = ct.real
def __init__(self, child, unbiased=False, *args, **kwargs):
self.unbiased = unbiased
Reduction.__init__(self, child, *args, **kwargs)
class count(Reduction):
""" The number of non-null elements """
_dtype = ct.int_
class nunique(Reduction):
_dtype = ct.int_
class Summary(Expr):
""" A collection of named reductions
Examples
--------
>>> t = Symbol('t', 'var * {name: string, amount: int, id: int}')
>>> expr = summary(number=t.id.nunique(), sum=t.amount.sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 1]]
>>> from blaze.compute.python import compute
>>> compute(expr, data)
(2, 350)
"""
__slots__ = '_child', 'names', 'values', 'keepdims'
def __init__(self, _child, names, values, keepdims=False):
self._child = _child
self.names = names
self.values = values
self.keepdims = keepdims
@property
def dshape(self):
measure = Record(list(zip(self.names,
[v._dtype for v in self.values])))
if self.keepdims:
return DataShape(*((1,) * self._child.ndim + (measure,)))
else:
return DataShape(measure)
def __str__(self):
return 'summary(' + ', '.join('%s=%s' % (name, str(val))
for name, val in zip(self.fields, self.values)) + \
', keepdims=%s' % self.keepdims + ')'
def summary(keepdims=False, **kwargs):
items = sorted(kwargs.items(), key=first)
names = tuple(map(first, items))
values = tuple(map(toolz.second, items))
child = common_subexpression(*values)
if len(kwargs) == 1 and not iscollection(child.dshape):
while not iscollection(child.dshape):
children = [i for i in child._inputs if isinstance(i, Expr)]
if len(children) == 1:
child = children[0]
else:
raise ValueError()
return Summary(child, names, values, keepdims=keepdims)
summary.__doc__ = Summary.__doc__
from datashape.predicates import (iscollection, isscalar, isrecord, isboolean,
isnumeric)
from .expressions import schema_method_list, dshape_method_list
schema_method_list.extend([
(isboolean, set([any, all, sum])),
(isnumeric, set([mean, sum, mean, min, max, std, var])),
])
dshape_method_list.extend([
(iscollection, set([count, nunique, min, max])),
])
| {
"repo_name": "vitan/blaze",
"path": "blaze/expr/reductions.py",
"copies": "1",
"size": "6478",
"license": "bsd-3-clause",
"hash": 493355932014252000,
"line_mean": 26.1046025105,
"line_max": 79,
"alpha_frac": 0.5663785119,
"autogenerated": false,
"ratio": 3.8513674197384065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.990927661750133,
"avg_score": 0.0016938628274152376,
"num_lines": 239
} |
from __future__ import absolute_import, division, print_function
import toolz
from toolz import pipe
import itertools
from datashape import discover, Unit, Tuple, Record, iscollection, isscalar
import sqlalchemy as sa
from ..data.sql import dshape_to_alchemy
from ..dispatch import dispatch
from ..expr import *
from .utils import literalquery
__all__ = []
try:
import pyspark
from pyspark.sql import SchemaRDD
except ImportError:
SchemaRDD = type(None)
names = ('_table_%d' % i for i in itertools.count(1))
__all__ = []
class SparkSQLQuery(object):
""" Pair of PySpark SQLContext and SQLAlchemy Table
Python's SparkSQL interface only accepts strings. We use SQLAlchemy to
generate these strings. To do this we'll have to pass around pairs of
(SQLContext, sqlalchemy.Selectable). Additionally we track a mapping of
{schemardd: sqlalchemy.Table}
Parameters
----------
context: pyspark.sql.SQLContext
query: sqlalchemy.Selectable
mapping: dict :: {pyspark.sql.SchemaRDD: sqlalchemy.Table}
"""
__slots__ = 'context', 'query', 'mapping'
def __init__(self, context, query, mapping):
self.context = context
self.query = query
self.mapping = mapping
def make_query(rdd, primary_key='', name=None):
# SparkSQL
name = name or next(names)
context = rdd.sql_ctx
context.registerRDDAsTable(rdd, name)
# SQLAlchemy
schema = discover(rdd).subshape[0]
columns = dshape_to_alchemy(schema)
for column in columns:
if column.name == primary_key:
column.primary_key = True
metadata = sa.MetaData() # TODO: sync this between many tables
query = sa.Table(name, metadata, *columns)
mapping = {rdd: query}
return SparkSQLQuery(context, query, mapping)
@dispatch(Symbol, SchemaRDD)
def compute_up(ts, rdd, **kwargs):
return make_query(rdd)
@dispatch((var, Label, std, Sort, count, nunique, Selection, mean,
Head, ReLabel, Distinct, ElemWise, By, any, all, sum, max,
min, Reduction, Projection, Field), SchemaRDD)
def compute_up(e, rdd, **kwargs):
return compute_up(e, make_query(rdd), **kwargs)
@dispatch((BinOp, Join),
(SparkSQLQuery, SchemaRDD),
(SparkSQLQuery, SchemaRDD))
def compute_up(e, a, b, **kwargs):
if not isinstance(a, SparkSQLQuery):
a = make_query(a)
if not isinstance(b, SparkSQLQuery):
b = make_query(b)
return compute_up(e, a, b, **kwargs)
@dispatch((UnaryOp, Expr), SparkSQLQuery)
def compute_up(expr, q, **kwargs):
scope = kwargs.pop('scope', dict())
scope = dict((t, q.mapping.get(data, data)) for t, data in scope.items())
q2 = compute_up(expr, q.query, scope=scope, **kwargs)
return SparkSQLQuery(q.context, q2, q.mapping)
@dispatch((BinOp, Join, Expr), SparkSQLQuery, SparkSQLQuery)
def compute_up(expr, a, b, **kwargs):
assert a.context == b.context
mapping = toolz.merge(a.mapping, b.mapping)
scope = kwargs.pop('scope', dict())
scope = dict((t, mapping.get(data, data)) for t, data in scope.items())
c = compute_up(expr, a.query, b.query, scope=scope, **kwargs)
return SparkSQLQuery(a.context, c, mapping)
from .sql import select
def sql_string(query):
return pipe(query, select, literalquery, str)
@dispatch(Expr, SparkSQLQuery, dict)
def post_compute(expr, query, d):
result = query.context.sql(sql_string(query.query))
if iscollection(expr.dshape) and isscalar(expr.dshape.measure):
result = result.map(lambda x: x[0])
return result
@dispatch(Head, SparkSQLQuery, dict)
def post_compute(expr, query, d):
result = query.context.sql(sql_string(query.query))
if iscollection(expr.dshape) and isscalar(expr.dshape.measure):
result = result.map(lambda x: x[0])
return result.collect()
| {
"repo_name": "vitan/blaze",
"path": "blaze/compute/sparksql.py",
"copies": "1",
"size": "3846",
"license": "bsd-3-clause",
"hash": 546851966662616000,
"line_mean": 27.4888888889,
"line_max": 77,
"alpha_frac": 0.6703068123,
"autogenerated": false,
"ratio": 3.464864864864865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4635171677164865,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import toolz
import datashape
import functools
from toolz import concat, memoize, partial
import re
from datashape import dshape, DataShape, Record, Var, Mono
from datashape.predicates import isscalar, iscollection, isboolean, isrecord
from ..compatibility import _strtypes, builtins
from .core import *
from .method_dispatch import select_functions
from ..dispatch import dispatch
__all__ = ['Expr', 'ElemWise', 'Field', 'Symbol', 'discover', 'Projection',
'projection', 'Selection', 'selection', 'Label', 'label', 'Map',
'ReLabel', 'relabel', 'Apply', 'Slice', 'shape', 'ndim']
def isvalid_identifier(s):
"""
>>> isvalid_identifier('Hello')
True
>>> isvalid_identifier('Hello world')
False
>>> isvalid_identifier('Helloworld!')
False
"""
return not not re.match('^\w+$', s)
class Expr(Node):
"""
Symbolic expression of a computation
All Blaze expressions (Join, By, Sort, ...) descend from this class. It
contains shared logic and syntax. It in turn inherits from ``Node`` which
holds all tree traversal logic
"""
def _get_field(self, fieldname):
if not isinstance(self.dshape.measure, Record):
if fieldname == self._name:
return self
raise ValueError("Can not get field '%s' of non-record expression %s"
% (fieldname, self))
return Field(self, fieldname)
def __getitem__(self, key):
if isinstance(key, _strtypes) and key in self.fields:
return self._get_field(key)
elif isinstance(key, Expr) and iscollection(key.dshape):
return selection(self, key)
elif (isinstance(key, list)
and builtins.all(isinstance(k, _strtypes) for k in key)):
if set(key).issubset(self.fields):
return self._project(key)
else:
raise ValueError('Names %s not consistent with known names %s'
% (key, self.fields))
elif (isinstance(key, tuple)
and all(isinstance(k, (int, slice)) for k in key)):
return Slice(self, key)
elif isinstance(key, (slice, int)):
return Slice(self, (key,))
raise ValueError("Not understood %s[%s]" % (self, key))
def map(self, func, schema=None, name=None):
return Map(self, func, schema, name)
def _project(self, key):
return projection(self, key)
@property
def schema(self):
return datashape.dshape(self.dshape.measure)
@property
def fields(self):
if isinstance(self.dshape.measure, Record):
return self.dshape.measure.names
if hasattr(self, '_name'):
return [self._name]
def _len(self):
try:
return int(self.dshape[0])
except TypeError:
raise ValueError('Can not determine length of table with the '
'following datashape: %s' % self.dshape)
def __len__(self): # pragma: no cover
return self._len()
def __dir__(self):
result = dir(type(self))
if isrecord(self.dshape.measure) and self.fields:
result.extend(list(self.fields))
d = toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape))
result.extend(list(d))
return sorted(set(filter(isvalid_identifier, result)))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if self.fields and key in self.fields:
if isscalar(self.dshape.measure): # t.foo.foo is t.foo
return self
else:
return self[key]
d = toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape))
if key in d:
func = d[key]
if func in method_properties:
return func(self)
else:
return functools.update_wrapper(partial(func, self), func)
else:
raise
@property
def _name(self):
pass
class Symbol(Expr):
"""
Symbolic data. The leaf of a Blaze expression
Example
-------
>>> points = Symbol('points', '5 * 3 * {x: int, y: int}')
"""
__slots__ = '_name', 'dshape'
__inputs__ = ()
def __init__(self, name, dshape):
self._name = name
if isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Mono) and not isinstance(dshape, DataShape):
dshape = DataShape(dshape)
self.dshape = dshape
def __str__(self):
return self._name
def _resources(self):
return dict()
class ElemWise(Expr):
"""
Elementwise operation.
The shape of this expression matches the shape of the child.
"""
@property
def dshape(self):
return datashape.DataShape(*(self._child.dshape.shape
+ tuple(self.schema)))
class Field(ElemWise):
"""
A single field from an expression
Get a single field from an expression with record-type schema. Collapses
that record. We store the name of the field in the ``_name`` attribute.
SELECT a
FROM table
>>> points = Symbol('points', '5 * 3 * {x: int32, y: int32}')
>>> points.x.dshape
dshape("5 * 3 * int32")
"""
__slots__ = '_child', '_name'
def __str__(self):
if re.match('^\w+$', self._name):
return '%s.%s' % (self._child, self._name)
else:
return "%s['%s']" % (self._child, self._name)
@property
def _expr(self):
return Symbol(self._name, datashape.DataShape(self.dshape.measure))
@property
def dshape(self):
shape = self._child.dshape.shape
schema = self._child.dshape.measure.dict[self._name]
shape = shape + schema.shape
schema = (schema.measure,)
return DataShape(*(shape + schema))
class Projection(ElemWise):
""" Select fields from data
SELECT a, b, c
FROM table
Examples
--------
>>> accounts = Symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> accounts[['name', 'amount']].schema
dshape("{ name : string, amount : int32 }")
See Also
--------
blaze.expr.expressions.Field
"""
__slots__ = '_child', '_fields'
@property
def fields(self):
return list(self._fields)
@property
def schema(self):
d = self._child.schema[0].dict
return DataShape(Record([(name, d[name]) for name in self.fields]))
def __str__(self):
return '%s[[%s]]' % (self._child,
', '.join(["'%s'" % name for name in self.fields]))
def _project(self, key):
if isinstance(key, list) and set(key).issubset(set(self.fields)):
return self._child[key]
raise ValueError("Column Mismatch: %s" % key)
def _get_field(self, fieldname):
if fieldname in self.fields:
return Field(self._child, fieldname)
raise ValueError("Field %s not found in columns %s" % (fieldname,
self.fields))
def projection(expr, names):
if not isinstance(names, (tuple, list)):
raise TypeError("Wanted list of strings, got %s" % names)
if not set(names).issubset(expr.fields):
raise ValueError("Mismatched names. Asking for names %s "
"where expression has names %s" % (names, expr.fields))
return Projection(expr, tuple(names))
projection.__doc__ = Projection.__doc__
from .utils import hashable_index, replace_slices
class Slice(Expr):
__slots__ = '_child', '_index'
def __init__(self, child, index):
self._child = child
self._index = hashable_index(index)
hash(self)
@property
def dshape(self):
return self._child.dshape.subshape[self.index]
@property
def index(self):
return replace_slices(self._index)
def __str__(self):
if isinstance(self.index, tuple):
return '%s[%s]' % (self._child, ', '.join(map(str, self._index)))
else:
return '%s[%s]' % (self._child, self._index)
class Selection(Expr):
""" Filter elements of expression based on predicate
Examples
--------
>>> accounts = Symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> deadbeats = accounts[accounts.amount < 0]
"""
__slots__ = '_child', 'predicate'
def __str__(self):
return "%s[%s]" % (self._child, self.predicate)
@property
def dshape(self):
shape = list(self._child.dshape.shape)
shape[0] = Var()
return DataShape(*(shape + [self._child.dshape.measure]))
def selection(table, predicate):
subexpr = common_subexpression(table, predicate)
if not builtins.all(isinstance(node, (ElemWise, Symbol))
or node.isidentical(subexpr)
for node in concat([path(predicate, subexpr),
path(table, subexpr)])):
raise ValueError("Selection not properly matched with table:\n"
"child: %s\n"
"apply: %s\n"
"predicate: %s" % (subexpr, table, predicate))
if not isboolean(predicate.dshape):
raise TypeError("Must select over a boolean predicate. Got:\n"
"%s[%s]" % (table, predicate))
return table._subs({subexpr: Selection(subexpr, predicate)})
selection.__doc__ = Selection.__doc__
class Label(ElemWise):
""" A Labeled expresion
Examples
--------
>>> accounts = Symbol('accounts', 'var * {name: string, amount: int}')
>>> (accounts.amount * 100)._name
'amount'
>>> (accounts.amount * 100).label('new_amount')._name
'new_amount'
See Also
--------
blaze.expr.expressions.ReLabel
"""
__slots__ = '_child', 'label'
@property
def schema(self):
return self._child.schema
@property
def _name(self):
return self.label
def _get_field(self, key):
if key[0] == self.fields[0]:
return self
else:
raise ValueError("Column Mismatch: %s" % key)
def label(expr, lab):
if expr._name == lab:
return expr
return Label(expr, lab)
label.__doc__ = Label.__doc__
class ReLabel(ElemWise):
"""
Table with same content but with new labels
Examples
--------
>>> accounts = Symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.schema
dshape("{ name : string, amount : int32 }")
>>> accounts.relabel(amount='balance').schema
dshape("{ name : string, balance : int32 }")
See Also
--------
blaze.expr.expressions.Label
"""
__slots__ = '_child', 'labels'
@property
def schema(self):
subs = dict(self.labels)
d = self._child.dshape.measure.dict
return DataShape(Record([[subs.get(name, name), dtype]
for name, dtype in self._child.dshape.measure.parameters[0]]))
def __str__(self):
return '%s.relabel(%s)' % (self._child, ', '.join('%s="%s"' % l for l
in self.labels))
def relabel(child, labels=None, **kwargs):
labels = labels or dict()
labels = toolz.merge(labels, kwargs)
if isinstance(labels, dict): # Turn dict into tuples
labels = tuple(sorted(labels.items()))
if isscalar(child.dshape.measure):
if child._name == labels[0][0]:
return child.label(labels[0][1])
else:
return child
return ReLabel(child, labels)
relabel.__doc__ = ReLabel.__doc__
class Map(ElemWise):
""" Map an arbitrary Python function across elements in a collection
Examples
--------
>>> from datetime import datetime
>>> t = Symbol('t', 'var * {price: real, time: int64}') # times as integers
>>> datetimes = t.time.map(datetime.utcfromtimestamp)
Optionally provide extra schema information
>>> datetimes = t.time.map(datetime.utcfromtimestamp,
... schema='{time: datetime}')
See Also
--------
blaze.expr.expresions.Apply
"""
__slots__ = '_child', 'func', '_schema', '_name0'
@property
def schema(self):
if self._schema:
return dshape(self._schema)
else:
raise NotImplementedError("Schema of mapped column not known.\n"
"Please specify datashape keyword in .map method.\n"
"Example: t.columnname.map(function, 'int64')")
def label(self, name):
assert isscalar(self.dshape.measure)
return Map(self._child,
self.func,
self.schema,
name)
@property
def shape(self):
return self._child.shape
@property
def ndim(self):
return self._child.ndim
@property
def _name(self):
if self._name0:
return self._name0
else:
return self._child._name
class Apply(Expr):
""" Apply an arbitrary Python function onto an expression
Examples
--------
>>> t = Symbol('t', 'var * {name: string, amount: int}')
>>> h = Apply(t, hash) # Hash value of resultant table
Optionally provide extra datashape information
>>> h = Apply(t, hash, dshape='real')
Apply brings a function within the expression tree.
The following transformation is often valid
Before ``compute(Apply(expr, f), ...)``
After ``f(compute(expr, ...)``
See Also
--------
blaze.expr.expressions.Map
"""
__slots__ = '_child', 'func', '_dshape'
def __init__(self, child, func, dshape=None):
self._child = child
self.func = func
self._dshape = dshape
@property
def schema(self):
if iscollection(self.dshape):
return self.dshape.subshape[0]
else:
raise TypeError("Non-tabular datashape, %s" % self.dshape)
@property
def dshape(self):
if self._dshape:
return dshape(self._dshape)
else:
raise NotImplementedError("Datashape of arbitrary Apply not defined")
dshape_method_list = list()
schema_method_list = list()
method_properties = set()
dshape_methods = memoize(partial(select_functions, dshape_method_list))
schema_methods = memoize(partial(select_functions, schema_method_list))
def shape(expr):
""" Shape of expression
>>> Symbol('s', '3 * 5 * int32').shape
(3, 5)
Works on anything discoverable
>>> shape([[1, 2], [3, 4]])
(2, 2)
"""
s = list(discover(expr).shape)
for i, elem in enumerate(s):
try:
s[i] = int(elem)
except TypeError:
pass
return tuple(s)
def ndim(expr):
""" Number of dimensions of expression
>>> Symbol('s', '3 * var * int32').ndim
2
"""
return len(shape(expr))
dshape_method_list.extend([
(iscollection, set([shape, ndim])),
])
schema_method_list.extend([
(isscalar, set([label, relabel])),
(isrecord, set([relabel])),
])
method_properties.update([shape, ndim])
@dispatch(Expr)
def discover(expr):
return expr.dshape
| {
"repo_name": "vitan/blaze",
"path": "blaze/expr/expressions.py",
"copies": "1",
"size": "15494",
"license": "bsd-3-clause",
"hash": 2752361088076144600,
"line_mean": 25.9460869565,
"line_max": 81,
"alpha_frac": 0.5669291339,
"autogenerated": false,
"ratio": 3.911638475132542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4978567609032542,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import toolz
import datashape
import functools
import keyword
import numpy as np
from toolz import concat, memoize, partial
from toolz.curried import map, filter
import re
from datashape import dshape, DataShape, Record, Var, Mono, Fixed
from datashape.predicates import isscalar, iscollection, isboolean, isrecord
from ..compatibility import _strtypes, builtins
from .core import Node, subs, common_subexpression, path
from .method_dispatch import select_functions
from ..dispatch import dispatch
__all__ = ['Expr', 'ElemWise', 'Field', 'Symbol', 'discover', 'Projection',
'projection', 'Selection', 'selection', 'Label', 'label', 'Map',
'ReLabel', 'relabel', 'Apply', 'Slice', 'shape', 'ndim', 'label',
'symbol']
_attr_cache = dict()
def isvalid_identifier(s, regex=re.compile('^[_a-zA-Z][_a-zA-Z0-9]*$')):
"""Check whether a string is a valid Python identifier
Examples
--------
>>> isvalid_identifier('Hello')
True
>>> isvalid_identifier('Hello world')
False
>>> isvalid_identifier('Helloworld!')
False
>>> isvalid_identifier('1a')
False
>>> isvalid_identifier('a1')
True
>>> isvalid_identifier('for')
False
>>> isvalid_identifier(None)
False
"""
return not not s and not keyword.iskeyword(s) and regex.match(s) is not None
def valid_identifier(s):
"""Rewrite a string to be a valid identifier if it contains
>>> valid_identifier('hello')
'hello'
>>> valid_identifier('hello world')
'hello_world'
>>> valid_identifier('hello.world')
'hello_world'
>>> valid_identifier('hello-world')
'hello_world'
>>> valid_identifier(None)
>>> valid_identifier('1a')
"""
if isinstance(s, _strtypes):
if s[0].isdigit():
return
return s.replace(' ', '_').replace('.', '_').replace('-', '_')
return s
class Expr(Node):
"""
Symbolic expression of a computation
All Blaze expressions (Join, By, Sort, ...) descend from this class. It
contains shared logic and syntax. It in turn inherits from ``Node`` which
holds all tree traversal logic
"""
def _get_field(self, fieldname):
if not isinstance(self.dshape.measure, Record):
if fieldname == self._name:
return self
raise ValueError(
"Can not get field '%s' of non-record expression %s" %
(fieldname, self))
return Field(self, fieldname)
def __getitem__(self, key):
if isinstance(key, _strtypes) and key in self.fields:
return self._get_field(key)
elif isinstance(key, Expr) and iscollection(key.dshape):
return selection(self, key)
elif (isinstance(key, list)
and builtins.all(isinstance(k, _strtypes) for k in key)):
if set(key).issubset(self.fields):
return self._project(key)
else:
raise ValueError('Names %s not consistent with known names %s'
% (key, self.fields))
elif (isinstance(key, tuple) and
all(isinstance(k, (int, slice, type(None), list, np.ndarray))
for k in key)):
return sliceit(self, key)
elif isinstance(key, (slice, int, type(None), list, np.ndarray)):
return sliceit(self, (key,))
raise ValueError("Not understood %s[%s]" % (self, key))
def map(self, func, schema=None, name=None):
return Map(self, func, schema, name)
def _project(self, key):
return projection(self, key)
@property
def schema(self):
return datashape.dshape(self.dshape.measure)
@property
def fields(self):
if isinstance(self.dshape.measure, Record):
return self.dshape.measure.names
name = getattr(self, '_name', None)
if name is not None:
return [self._name]
return []
def _len(self):
try:
return int(self.dshape[0])
except TypeError:
raise ValueError('Can not determine length of table with the '
'following datashape: %s' % self.dshape)
def __len__(self): # pragma: no cover
return self._len()
def __iter__(self):
raise NotImplementedError(
'Iteration over expressions is not supported.\n'
'Iterate over computed result instead, e.g. \n'
"\titer(expr) # don't do this\n"
"\titer(compute(expr)) # do this instead")
def __dir__(self):
result = dir(type(self))
if isrecord(self.dshape.measure) and self.fields:
result.extend(list(map(valid_identifier, self.fields)))
d = toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape))
result.extend(list(d))
return sorted(set(filter(isvalid_identifier, result)))
def __getattr__(self, key):
if key == '_hash':
raise AttributeError()
try:
return _attr_cache[(self, key)]
except:
pass
try:
result = object.__getattribute__(self, key)
except AttributeError:
fields = dict(zip(map(valid_identifier, self.fields),
self.fields))
if self.fields and key in fields:
if isscalar(self.dshape.measure): # t.foo.foo is t.foo
result = self
else:
result = self[fields[key]]
else:
d = toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape))
if key in d:
func = d[key]
if func in method_properties:
result = func(self)
else:
result = functools.update_wrapper(partial(func, self),
func)
else:
raise
_attr_cache[(self, key)] = result
return result
@property
def _name(self):
if (isscalar(self.dshape.measure) and
len(self._inputs) == 1 and
isscalar(self._child.dshape.measure)):
return self._child._name
def __enter__(self):
""" Enter context """
return self
def __exit__(self, *args):
""" Exit context
Close any open resource if we are called in context
"""
for value in self._resources().values():
try:
value.close()
except AttributeError:
pass
return True
_symbol_cache = dict()
def _symbol_key(args, kwargs):
if len(args) == 1:
name, = args
ds = None
token = None
if len(args) == 2:
name, ds = args
token = None
elif len(args) == 3:
name, ds, token = args
ds = kwargs.get('dshape', ds)
token = kwargs.get('token', token)
ds = dshape(ds)
return (name, ds, token)
@memoize(cache=_symbol_cache, key=_symbol_key)
def symbol(name, dshape, token=None):
return Symbol(name, dshape, token=token)
class Symbol(Expr):
"""
Symbolic data. The leaf of a Blaze expression
Example
-------
>>> points = symbol('points', '5 * 3 * {x: int, y: int}')
>>> points
points
>>> points.dshape
dshape("5 * 3 * {x: int32, y: int32}")
"""
__slots__ = '_hash', '_name', 'dshape', '_token'
__inputs__ = ()
def __init__(self, name, dshape, token=None):
self._name = name
if isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Mono) and not isinstance(dshape, DataShape):
dshape = DataShape(dshape)
self.dshape = dshape
self._token = token
def __str__(self):
return self._name or ''
def _resources(self):
return dict()
@dispatch(Symbol, dict)
def _subs(o, d):
""" Subs symbols using symbol function
Supports caching"""
newargs = [subs(arg, d) for arg in o._args]
return symbol(*newargs)
class ElemWise(Expr):
"""
Elementwise operation.
The shape of this expression matches the shape of the child.
"""
@property
def dshape(self):
return datashape.DataShape(*(self._child.dshape.shape
+ tuple(self.schema)))
class Field(ElemWise):
"""
A single field from an expression
Get a single field from an expression with record-type schema. Collapses
that record. We store the name of the field in the ``_name`` attribute.
SELECT a
FROM table
>>> points = symbol('points', '5 * 3 * {x: int32, y: int32}')
>>> points.x.dshape
dshape("5 * 3 * int32")
"""
__slots__ = '_hash', '_child', '_name'
def __str__(self):
if re.match('^\w+$', self._name):
return '%s.%s' % (self._child, self._name)
else:
return "%s['%s']" % (self._child, self._name)
@property
def _expr(self):
return symbol(self._name, datashape.DataShape(self.dshape.measure))
@property
def dshape(self):
shape = self._child.dshape.shape
schema = self._child.dshape.measure.dict[self._name]
shape = shape + schema.shape
schema = (schema.measure,)
return DataShape(*(shape + schema))
class Projection(ElemWise):
""" Select fields from data
SELECT a, b, c
FROM table
Examples
--------
>>> accounts = symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> accounts[['name', 'amount']].schema
dshape("{name: string, amount: int32}")
>>> accounts[['name', 'amount']]
accounts[['name', 'amount']]
See Also
--------
blaze.expr.expressions.Field
"""
__slots__ = '_hash', '_child', '_fields'
@property
def fields(self):
return list(self._fields)
@property
def schema(self):
d = self._child.schema[0].dict
return DataShape(Record([(name, d[name]) for name in self.fields]))
def __str__(self):
return '%s[%s]' % (self._child, self.fields)
def _project(self, key):
if isinstance(key, list) and set(key).issubset(set(self.fields)):
return self._child[key]
raise ValueError("Column Mismatch: %s" % key)
def _get_field(self, fieldname):
if fieldname in self.fields:
return Field(self._child, fieldname)
raise ValueError("Field %s not found in columns %s" % (fieldname,
self.fields))
def projection(expr, names):
if not names:
raise ValueError("Projection with no names")
if not isinstance(names, (tuple, list)):
raise TypeError("Wanted list of strings, got %s" % names)
if not set(names).issubset(expr.fields):
raise ValueError("Mismatched names. Asking for names %s "
"where expression has names %s" %
(names, expr.fields))
return Projection(expr, tuple(names))
projection.__doc__ = Projection.__doc__
from .utils import hashable_index, replace_slices
def sanitize_index_lists(ind):
""" Handle lists/arrays of integers/bools as indexes
>>> sanitize_index_lists([2, 3, 5])
[2, 3, 5]
>>> sanitize_index_lists([True, False, True, False])
[0, 2]
>>> sanitize_index_lists(np.array([1, 2, 3]))
[1, 2, 3]
>>> sanitize_index_lists(np.array([False, True, True]))
[1, 2]
"""
if not isinstance(ind, (list, np.ndarray)):
return ind
if isinstance(ind, np.ndarray):
ind = ind.tolist()
if isinstance(ind, list) and ind and isinstance(ind[0], bool):
ind = [a for a, b in enumerate(ind) if b]
return ind
def sliceit(child, index):
index2 = tuple(map(sanitize_index_lists, index))
index3 = hashable_index(index2)
s = Slice(child, index3)
hash(s)
return s
class Slice(Expr):
__slots__ = '_hash', '_child', '_index'
@property
def dshape(self):
return self._child.dshape.subshape[self.index]
@property
def index(self):
return replace_slices(self._index)
def __str__(self):
if type(self.index) == tuple:
return '%s[%s]' % (self._child, ', '.join(map(str, self._index)))
else:
return '%s[%s]' % (self._child, self._index)
class Selection(Expr):
""" Filter elements of expression based on predicate
Examples
--------
>>> accounts = symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> deadbeats = accounts[accounts.amount < 0]
"""
__slots__ = '_hash', '_child', 'predicate'
def __str__(self):
return "%s[%s]" % (self._child, self.predicate)
@property
def dshape(self):
shape = list(self._child.dshape.shape)
shape[0] = Var()
return DataShape(*(shape + [self._child.dshape.measure]))
def selection(table, predicate):
subexpr = common_subexpression(table, predicate)
if not builtins.all(isinstance(node, (ElemWise, Symbol))
or node.isidentical(subexpr)
for node in concat([path(predicate, subexpr),
path(table, subexpr)])):
raise ValueError("Selection not properly matched with table:\n"
"child: %s\n"
"apply: %s\n"
"predicate: %s" % (subexpr, table, predicate))
if not isboolean(predicate.dshape):
raise TypeError("Must select over a boolean predicate. Got:\n"
"%s[%s]" % (table, predicate))
return table._subs({subexpr: Selection(subexpr, predicate)})
selection.__doc__ = Selection.__doc__
class Label(ElemWise):
"""A Labeled expression
Examples
--------
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> expr = accounts.amount * 100
>>> expr._name
'amount'
>>> expr.label('new_amount')._name
'new_amount'
See Also
--------
blaze.expr.expressions.ReLabel
"""
__slots__ = '_hash', '_child', 'label'
@property
def schema(self):
return self._child.schema
@property
def _name(self):
return self.label
def _get_field(self, key):
if key[0] == self.fields[0]:
return self
else:
raise ValueError("Column Mismatch: %s" % key)
def __str__(self):
return "label(%s, %r)" % (self._child, self.label)
def label(expr, lab):
if expr._name == lab:
return expr
return Label(expr, lab)
label.__doc__ = Label.__doc__
class ReLabel(ElemWise):
"""
Table with same content but with new labels
Examples
--------
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.schema
dshape("{name: string, amount: int32}")
>>> accounts.relabel(amount='balance').schema
dshape("{name: string, balance: int32}")
>>> accounts.relabel(not_a_column='definitely_not_a_column')
Traceback (most recent call last):
...
ValueError: Cannot relabel non-existent child fields: {'not_a_column'}
See Also
--------
blaze.expr.expressions.Label
"""
__slots__ = '_hash', '_child', 'labels'
@property
def schema(self):
subs = dict(self.labels)
param = self._child.dshape.measure.parameters[0]
return DataShape(Record([[subs.get(name, name), dtype]
for name, dtype in param]))
def __str__(self):
return ('%s.relabel(%s)' %
(self._child, ', '.join('%s=%r' % l for l in self.labels)))
def relabel(child, labels=None, **kwargs):
labels = labels or dict()
labels = toolz.merge(labels, kwargs)
labels = dict((k, v) for k, v in labels.items() if k != v)
label_keys = set(labels)
fields = child.fields
if not label_keys.issubset(fields):
non_existent_fields = label_keys.difference(fields)
raise ValueError("Cannot relabel non-existent child fields: {%s}" %
', '.join(map(repr, non_existent_fields)))
if not labels:
return child
if isinstance(labels, dict): # Turn dict into tuples
labels = tuple(sorted(labels.items()))
if isscalar(child.dshape.measure):
if child._name == labels[0][0]:
return child.label(labels[0][1])
else:
return child
return ReLabel(child, labels)
relabel.__doc__ = ReLabel.__doc__
class Map(ElemWise):
""" Map an arbitrary Python function across elements in a collection
Examples
--------
>>> from datetime import datetime
>>> t = symbol('t', 'var * {price: real, time: int64}') # times as integers
>>> datetimes = t.time.map(datetime.utcfromtimestamp)
Optionally provide extra schema information
>>> datetimes = t.time.map(datetime.utcfromtimestamp,
... schema='{time: datetime}')
See Also
--------
blaze.expr.expresions.Apply
"""
__slots__ = '_hash', '_child', 'func', '_schema', '_name0'
@property
def schema(self):
if self._schema:
return dshape(self._schema)
else:
raise NotImplementedError("Schema of mapped column not known.\n"
"Please specify datashape keyword in "
".map method.\nExample: "
"t.columnname.map(function, 'int64')")
def label(self, name):
assert isscalar(self.dshape.measure)
return Map(self._child,
self.func,
self.schema,
name)
@property
def shape(self):
return self._child.shape
@property
def ndim(self):
return self._child.ndim
@property
def _name(self):
if self._name0:
return self._name0
else:
return self._child._name
class Apply(Expr):
""" Apply an arbitrary Python function onto an expression
Examples
--------
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> h = t.apply(hash, dshape='int64') # Hash value of resultant dataset
You must provide the datashape of the result with the ``dshape=`` keyword.
For datashape examples see
http://datashape.pydata.org/grammar.html#some-simple-examples
If using a chunking backend and your operation may be safely split and
concatenated then add the ``splittable=True`` keyword argument
>>> t.apply(f, dshape='...', splittable=True) # doctest: +SKIP
See Also
--------
blaze.expr.expressions.Map
"""
__slots__ = '_hash', '_child', 'func', '_dshape', '_splittable'
@property
def schema(self):
if iscollection(self.dshape):
return self.dshape.subshape[0]
else:
raise TypeError("Non-tabular datashape, %s" % self.dshape)
@property
def dshape(self):
return dshape(self._dshape)
def apply(expr, func, dshape, splittable=False):
return Apply(expr, func, datashape.dshape(dshape), splittable)
apply.__doc__ = Apply.__doc__
dshape_method_list = list()
schema_method_list = list()
method_properties = set()
dshape_methods = memoize(partial(select_functions, dshape_method_list))
schema_methods = memoize(partial(select_functions, schema_method_list))
@dispatch(DataShape)
def shape(ds):
s = ds.shape
s = tuple(int(d) if isinstance(d, Fixed) else d for d in s)
return s
@dispatch(object)
def shape(expr):
""" Shape of expression
>>> symbol('s', '3 * 5 * int32').shape
(3, 5)
Works on anything discoverable
>>> shape([[1, 2], [3, 4]])
(2, 2)
"""
s = list(discover(expr).shape)
for i, elem in enumerate(s):
try:
s[i] = int(elem)
except TypeError:
pass
return tuple(s)
def ndim(expr):
""" Number of dimensions of expression
>>> symbol('s', '3 * var * int32').ndim
2
"""
return len(shape(expr))
dshape_method_list.extend([
(lambda ds: True, set([apply])),
(iscollection, set([shape, ndim])),
])
schema_method_list.extend([
(isscalar, set([label, relabel])),
(isrecord, set([relabel])),
])
method_properties.update([shape, ndim])
@dispatch(Expr)
def discover(expr):
return expr.dshape
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/expr/expressions.py",
"copies": "1",
"size": "20763",
"license": "bsd-3-clause",
"hash": 4959353198440828000,
"line_mean": 27.1341463415,
"line_max": 80,
"alpha_frac": 0.5606607908,
"autogenerated": false,
"ratio": 3.8933058316144757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9951922598532676,
"avg_score": 0.00040880477635978643,
"num_lines": 738
} |
from __future__ import absolute_import, division, print_function
import toolz
import datashape
import re
from keyword import iskeyword
import numpy as np
from toolz import concat, memoize, partial, first
from toolz.curried import map, filter
from datashape import dshape, DataShape, Record, Var, Mono, Fixed
from datashape.predicates import isscalar, iscollection, isboolean, isrecord
from ..compatibility import _strtypes, builtins, boundmethod
from .core import Node, subs, common_subexpression, path
from .method_dispatch import select_functions
from ..dispatch import dispatch
from .utils import hashable_index, replace_slices
__all__ = ['Expr', 'ElemWise', 'Field', 'Symbol', 'discover', 'Projection',
'projection', 'Selection', 'selection', 'Label', 'label', 'Map',
'ReLabel', 'relabel', 'Apply', 'Slice', 'shape', 'ndim', 'label',
'symbol', 'Coerce']
_attr_cache = dict()
def isvalid_identifier(s):
"""Check whether a string is a valid Python identifier
Examples
--------
>>> isvalid_identifier('Hello')
True
>>> isvalid_identifier('Hello world')
False
>>> isvalid_identifier('Helloworld!')
False
>>> isvalid_identifier('1a')
False
>>> isvalid_identifier('a1')
True
>>> isvalid_identifier('for')
False
>>> isvalid_identifier(None)
False
"""
# the re module compiles and caches regexs so no need to compile it
return (s is not None and not iskeyword(s) and
re.match(r'^[_a-zA-Z][_a-zA-Z0-9]*$', s) is not None)
def valid_identifier(s):
"""Rewrite a string to be a valid identifier if it contains
>>> valid_identifier('hello')
'hello'
>>> valid_identifier('hello world')
'hello_world'
>>> valid_identifier('hello.world')
'hello_world'
>>> valid_identifier('hello-world')
'hello_world'
>>> valid_identifier(None)
>>> valid_identifier('1a')
"""
if isinstance(s, _strtypes):
if s[0].isdigit():
return
return s.replace(' ', '_').replace('.', '_').replace('-', '_')
return s
class Expr(Node):
"""
Symbolic expression of a computation
All Blaze expressions (Join, By, Sort, ...) descend from this class. It
contains shared logic and syntax. It in turn inherits from ``Node`` which
holds all tree traversal logic
"""
def _get_field(self, fieldname):
if not isinstance(self.dshape.measure, Record):
if fieldname == self._name:
return self
raise ValueError(
"Can not get field '%s' of non-record expression %s" %
(fieldname, self))
return Field(self, fieldname)
def __getitem__(self, key):
if isinstance(key, _strtypes) and key in self.fields:
return self._get_field(key)
elif isinstance(key, Expr) and iscollection(key.dshape):
return selection(self, key)
elif (isinstance(key, list)
and builtins.all(isinstance(k, _strtypes) for k in key)):
if set(key).issubset(self.fields):
return self._project(key)
else:
raise ValueError('Names %s not consistent with known names %s'
% (key, self.fields))
elif (isinstance(key, tuple) and
all(isinstance(k, (int, slice, type(None), list, np.ndarray))
for k in key)):
return sliceit(self, key)
elif isinstance(key, (slice, int, type(None), list, np.ndarray)):
return sliceit(self, (key,))
raise ValueError("Not understood %s[%s]" % (self, key))
def map(self, func, schema=None, name=None):
return Map(self, func, schema, name)
def _project(self, key):
return projection(self, key)
@property
def schema(self):
return datashape.dshape(self.dshape.measure)
@property
def fields(self):
if isinstance(self.dshape.measure, Record):
return self.dshape.measure.names
name = getattr(self, '_name', None)
if name is not None:
return [self._name]
return []
def _len(self):
try:
return int(self.dshape[0])
except TypeError:
raise ValueError('Can not determine length of table with the '
'following datashape: %s' % self.dshape)
def __len__(self): # pragma: no cover
return self._len()
def __iter__(self):
raise NotImplementedError(
'Iteration over expressions is not supported.\n'
'Iterate over computed result instead, e.g. \n'
"\titer(expr) # don't do this\n"
"\titer(compute(expr)) # do this instead")
def __dir__(self):
result = dir(type(self))
if isrecord(self.dshape.measure) and self.fields:
result.extend(list(map(valid_identifier, self.fields)))
d = toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape))
result.extend(list(d))
return sorted(set(filter(isvalid_identifier, result)))
def __getattr__(self, key):
if key == '_hash':
raise AttributeError()
try:
return _attr_cache[(self, key)]
except:
pass
try:
result = object.__getattribute__(self, key)
except AttributeError:
fields = dict(zip(map(valid_identifier, self.fields),
self.fields))
if self.fields and key in fields:
if isscalar(self.dshape.measure): # t.foo.foo is t.foo
result = self
else:
result = self[fields[key]]
else:
d = toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape))
if key in d:
func = d[key]
if func in method_properties:
result = func(self)
else:
result = boundmethod(func, self)
else:
raise
_attr_cache[(self, key)] = result
return result
@property
def _name(self):
if (isscalar(self.dshape.measure) and
len(self._inputs) == 1 and
isscalar(self._child.dshape.measure)):
return self._child._name
def __enter__(self):
""" Enter context """
return self
def __exit__(self, *args):
""" Exit context
Close any open resource if we are called in context
"""
for value in self._resources().values():
try:
value.close()
except AttributeError:
pass
return True
_symbol_cache = dict()
def _symbol_key(args, kwargs):
if len(args) == 1:
name, = args
ds = None
token = None
if len(args) == 2:
name, ds = args
token = None
elif len(args) == 3:
name, ds, token = args
ds = kwargs.get('dshape', ds)
token = kwargs.get('token', token)
ds = dshape(ds)
return (name, ds, token)
@memoize(cache=_symbol_cache, key=_symbol_key)
def symbol(name, dshape, token=None):
return Symbol(name, dshape, token=token)
class Symbol(Expr):
"""
Symbolic data. The leaf of a Blaze expression
Examples
--------
>>> points = symbol('points', '5 * 3 * {x: int, y: int}')
>>> points
points
>>> points.dshape
dshape("5 * 3 * {x: int32, y: int32}")
"""
__slots__ = '_hash', '_name', 'dshape', '_token'
__inputs__ = ()
def __init__(self, name, dshape, token=None):
self._name = name
if isinstance(dshape, _strtypes):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Mono) and not isinstance(dshape, DataShape):
dshape = DataShape(dshape)
self.dshape = dshape
self._token = token
def __str__(self):
return self._name or ''
def _resources(self):
return dict()
@dispatch(Symbol, dict)
def _subs(o, d):
""" Subs symbols using symbol function
Supports caching"""
newargs = [subs(arg, d) for arg in o._args]
return symbol(*newargs)
class ElemWise(Expr):
"""
Elementwise operation.
The shape of this expression matches the shape of the child.
"""
@property
def dshape(self):
return datashape.DataShape(*(self._child.dshape.shape
+ tuple(self.schema)))
class Field(ElemWise):
"""
A single field from an expression.
Get a single field from an expression with record-type schema.
We store the name of the field in the ``_name`` attribute.
Examples
--------
>>> points = symbol('points', '5 * 3 * {x: int32, y: int32}')
>>> points.x.dshape
dshape("5 * 3 * int32")
For fields that aren't valid Python identifiers, use ``[]`` syntax:
>>> points = symbol('points', '5 * 3 * {"space station": float64}')
>>> points['space station'].dshape
dshape("5 * 3 * float64")
"""
__slots__ = '_hash', '_child', '_name'
def __str__(self):
fmt = '%s.%s' if isvalid_identifier(self._name) else '%s[%r]'
return fmt % (self._child, self._name)
@property
def _expr(self):
return symbol(self._name, datashape.DataShape(self.dshape.measure))
@property
def dshape(self):
shape = self._child.dshape.shape
schema = self._child.dshape.measure.dict[self._name]
shape = shape + schema.shape
schema = (schema.measure,)
return DataShape(*(shape + schema))
class Projection(ElemWise):
"""Select a subset of fields from data.
Examples
--------
>>> accounts = symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> accounts[['name', 'amount']].schema
dshape("{name: string, amount: int32}")
>>> accounts[['name', 'amount']]
accounts[['name', 'amount']]
See Also
--------
blaze.expr.expressions.Field
"""
__slots__ = '_hash', '_child', '_fields'
@property
def fields(self):
return list(self._fields)
@property
def schema(self):
d = self._child.schema[0].dict
return DataShape(Record([(name, d[name]) for name in self.fields]))
def __str__(self):
return '%s[%s]' % (self._child, self.fields)
def _project(self, key):
if isinstance(key, list) and set(key).issubset(set(self.fields)):
return self._child[key]
raise ValueError("Column Mismatch: %s" % key)
def _get_field(self, fieldname):
if fieldname in self.fields:
return Field(self._child, fieldname)
raise ValueError("Field %s not found in columns %s" % (fieldname,
self.fields))
def projection(expr, names):
if not names:
raise ValueError("Projection with no names")
if not isinstance(names, (tuple, list)):
raise TypeError("Wanted list of strings, got %s" % names)
if not set(names).issubset(expr.fields):
raise ValueError("Mismatched names. Asking for names %s "
"where expression has names %s" %
(names, expr.fields))
return Projection(expr, tuple(names))
projection.__doc__ = Projection.__doc__
def sanitize_index_lists(ind):
""" Handle lists/arrays of integers/bools as indexes
>>> sanitize_index_lists([2, 3, 5])
[2, 3, 5]
>>> sanitize_index_lists([True, False, True, False])
[0, 2]
>>> sanitize_index_lists(np.array([1, 2, 3]))
[1, 2, 3]
>>> sanitize_index_lists(np.array([False, True, True]))
[1, 2]
"""
if not isinstance(ind, (list, np.ndarray)):
return ind
if isinstance(ind, np.ndarray):
ind = ind.tolist()
if isinstance(ind, list) and ind and isinstance(ind[0], bool):
ind = [a for a, b in enumerate(ind) if b]
return ind
def sliceit(child, index):
index2 = tuple(map(sanitize_index_lists, index))
index3 = hashable_index(index2)
s = Slice(child, index3)
hash(s)
return s
class Slice(Expr):
"""Elements `start` until `stop`. On many backends, a `step` parameter
is also allowed.
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts[2:7].dshape
dshape("5 * {name: string, amount: int32}")
>>> accounts[2:7:2].dshape
dshape("3 * {name: string, amount: int32}")
"""
__slots__ = '_hash', '_child', '_index'
@property
def dshape(self):
return self._child.dshape.subshape[self.index]
@property
def index(self):
return replace_slices(self._index)
def __str__(self):
if isinstance(self.index, tuple):
index = ', '.join(map(str, self._index))
else:
index = str(self._index)
return '%s[%s]' % (self._child, index)
class Selection(Expr):
""" Filter elements of expression based on predicate
Examples
--------
>>> accounts = symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> deadbeats = accounts[accounts.amount < 0]
"""
__slots__ = '_hash', '_child', 'predicate'
def __str__(self):
return "%s[%s]" % (self._child, self.predicate)
@property
def dshape(self):
shape = list(self._child.dshape.shape)
shape[0] = Var()
return DataShape(*(shape + [self._child.dshape.measure]))
def selection(table, predicate):
subexpr = common_subexpression(table, predicate)
if not builtins.all(isinstance(node, (ElemWise, Symbol))
or node.isidentical(subexpr)
for node in concat([path(predicate, subexpr),
path(table, subexpr)])):
raise ValueError("Selection not properly matched with table:\n"
"child: %s\n"
"apply: %s\n"
"predicate: %s" % (subexpr, table, predicate))
if not isboolean(predicate.dshape):
raise TypeError("Must select over a boolean predicate. Got:\n"
"%s[%s]" % (table, predicate))
return table._subs({subexpr: Selection(subexpr, predicate)})
selection.__doc__ = Selection.__doc__
class Label(ElemWise):
"""An expression with a name.
Examples
--------
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> expr = accounts.amount * 100
>>> expr._name
'amount'
>>> expr.label('new_amount')._name
'new_amount'
See Also
--------
blaze.expr.expressions.ReLabel
"""
__slots__ = '_hash', '_child', 'label'
@property
def schema(self):
return self._child.schema
@property
def _name(self):
return self.label
def _get_field(self, key):
if key[0] == self.fields[0]:
return self
raise ValueError("Column Mismatch: %s" % key)
def __str__(self):
return 'label(%s, %r)' % (self._child, self.label)
def label(expr, lab):
if expr._name == lab:
return expr
return Label(expr, lab)
label.__doc__ = Label.__doc__
class ReLabel(ElemWise):
"""
Table with same content but with new labels
Examples
--------
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.schema
dshape("{name: string, amount: int32}")
>>> accounts.relabel(amount='balance').schema
dshape("{name: string, balance: int32}")
>>> accounts.relabel(not_a_column='definitely_not_a_column')
Traceback (most recent call last):
...
ValueError: Cannot relabel non-existent child fields: {'not_a_column'}
>>> s = symbol('s', 'var * {"0": int64}')
>>> s.relabel({'0': 'foo'})
s.relabel({'0': 'foo'})
>>> s.relabel(0='foo') # doctest: +SKIP
Traceback (most recent call last):
...
SyntaxError: keyword can't be an expression
Notes
-----
When names are not valid Python names, such as integers or string with
spaces, you must pass a dictionary to ``relabel``. For example
.. code-block:: python
>>> s = symbol('s', 'var * {"0": int64}')
>>> s.relabel({'0': 'foo'})
s.relabel({'0': 'foo'})
>>> t = symbol('t', 'var * {"whoo hoo": ?float32}')
>>> t.relabel({"whoo hoo": 'foo'})
t.relabel({'whoo hoo': 'foo'})
See Also
--------
blaze.expr.expressions.Label
"""
__slots__ = '_hash', '_child', 'labels'
@property
def schema(self):
subs = dict(self.labels)
param = self._child.dshape.measure.parameters[0]
return DataShape(Record([[subs.get(name, name), dtype]
for name, dtype in param]))
def __str__(self):
labels = self.labels
if all(map(isvalid_identifier, map(first, labels))):
rest = ', '.join('%s=%r' % l for l in labels)
else:
rest = '{%s}' % ', '.join('%r: %r' % l for l in labels)
return '%s.relabel(%s)' % (self._child, rest)
def relabel(child, labels=None, **kwargs):
labels = labels or dict()
labels = toolz.merge(labels, kwargs)
labels = dict((k, v) for k, v in labels.items() if k != v)
label_keys = set(labels)
fields = child.fields
if not label_keys.issubset(fields):
non_existent_fields = label_keys.difference(fields)
raise ValueError("Cannot relabel non-existent child fields: {%s}" %
', '.join(map(repr, non_existent_fields)))
if not labels:
return child
if isinstance(labels, dict): # Turn dict into tuples
labels = tuple(sorted(labels.items()))
if isscalar(child.dshape.measure):
if child._name == labels[0][0]:
return child.label(labels[0][1])
else:
return child
return ReLabel(child, labels)
relabel.__doc__ = ReLabel.__doc__
class Map(ElemWise):
""" Map an arbitrary Python function across elements in a collection
Examples
--------
>>> from datetime import datetime
>>> t = symbol('t', 'var * {price: real, time: int64}') # times as integers
>>> datetimes = t.time.map(datetime.utcfromtimestamp)
Optionally provide extra schema information
>>> datetimes = t.time.map(datetime.utcfromtimestamp,
... schema='{time: datetime}')
See Also
--------
blaze.expr.expresions.Apply
"""
__slots__ = '_hash', '_child', 'func', '_schema', '_name0'
@property
def schema(self):
if self._schema:
return dshape(self._schema)
else:
raise NotImplementedError("Schema of mapped column not known.\n"
"Please specify datashape keyword in "
".map method.\nExample: "
"t.columnname.map(function, 'int64')")
def label(self, name):
assert isscalar(self.dshape.measure)
return Map(self._child,
self.func,
self.schema,
name)
@property
def shape(self):
return self._child.shape
@property
def ndim(self):
return self._child.ndim
@property
def _name(self):
if self._name0:
return self._name0
else:
return self._child._name
class Apply(Expr):
""" Apply an arbitrary Python function onto an expression
Examples
--------
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> h = t.apply(hash, dshape='int64') # Hash value of resultant dataset
You must provide the datashape of the result with the ``dshape=`` keyword.
For datashape examples see
http://datashape.pydata.org/grammar.html#some-simple-examples
If using a chunking backend and your operation may be safely split and
concatenated then add the ``splittable=True`` keyword argument
>>> t.apply(f, dshape='...', splittable=True) # doctest: +SKIP
See Also
--------
blaze.expr.expressions.Map
"""
__slots__ = '_hash', '_child', 'func', '_dshape', '_splittable'
@property
def schema(self):
if iscollection(self.dshape):
return self.dshape.subshape[0]
else:
raise TypeError("Non-tabular datashape, %s" % self.dshape)
@property
def dshape(self):
return dshape(self._dshape)
class Coerce(Expr):
"""Coerce an expression to a different type.
Examples
--------
>>> t = symbol('t', '100 * float64')
>>> t.coerce(to='int64')
t.coerce(to='int64')
>>> t.coerce('float32')
t.coerce(to='float32')
>>> t.coerce('int8').dshape
dshape("100 * int8")
"""
__slots__ = '_hash', '_child', 'to'
@property
def schema(self):
return self.to
@property
def dshape(self):
return DataShape(*(self._child.shape + (self.schema,)))
def __str__(self):
return '%s.coerce(to=%r)' % (self._child, str(self.schema))
def apply(expr, func, dshape, splittable=False):
return Apply(expr, func, datashape.dshape(dshape), splittable)
apply.__doc__ = Apply.__doc__
dshape_method_list = list()
schema_method_list = list()
method_properties = set()
dshape_methods = memoize(partial(select_functions, dshape_method_list))
schema_methods = memoize(partial(select_functions, schema_method_list))
@dispatch(DataShape)
def shape(ds):
s = ds.shape
s = tuple(int(d) if isinstance(d, Fixed) else d for d in s)
return s
@dispatch(object)
def shape(expr):
""" Shape of expression
>>> symbol('s', '3 * 5 * int32').shape
(3, 5)
Works on anything discoverable
>>> shape([[1, 2], [3, 4]])
(2, 2)
"""
s = list(discover(expr).shape)
for i, elem in enumerate(s):
try:
s[i] = int(elem)
except TypeError:
pass
return tuple(s)
def ndim(expr):
""" Number of dimensions of expression
>>> symbol('s', '3 * var * int32').ndim
2
"""
return len(shape(expr))
def coerce(expr, to):
return Coerce(expr, dshape(to) if isinstance(to, _strtypes) else to)
coerce.__doc__ = Coerce.__doc__
dshape_method_list.extend([
(lambda ds: True, set([apply])),
(iscollection, set([shape, ndim])),
(lambda ds: iscollection(ds) and isscalar(ds.measure), set([coerce]))
])
schema_method_list.extend([
(isscalar, set([label, relabel, coerce])),
(isrecord, set([relabel])),
])
method_properties.update([shape, ndim])
@dispatch(Expr)
def discover(expr):
return expr.dshape
| {
"repo_name": "dwillmer/blaze",
"path": "blaze/expr/expressions.py",
"copies": "1",
"size": "22953",
"license": "bsd-3-clause",
"hash": -3723844283422460000,
"line_mean": 27.3720642769,
"line_max": 80,
"alpha_frac": 0.5615387967,
"autogenerated": false,
"ratio": 3.830607476635514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48921462733355137,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import toolz
__all__ = ['Expr', 'Scalar']
def _str(s):
""" Wrap single quotes around strings """
if isinstance(s, str):
return "'%s'" % s
else:
return str(s)
class Expr(object):
@property
def args(self):
return tuple(getattr(self, slot) for slot in self.__slots__)
def isidentical(self, other):
return type(self) == type(other) and self.args == other.args
__eq__ = isidentical
def __hash__(self):
return hash((type(self), self.args))
def __str__(self):
return "%s(%s)" % (type(self).__name__, ', '.join(map(_str, self.args)))
def __repr__(self):
return str(self)
def traverse(self):
""" Traverse over tree, yielding all subtrees and leaves """
yield self
traversals = (arg.traverse() if isinstance(arg, Expr) else [arg]
for arg in self.args)
for trav in traversals:
for item in trav:
yield item
def subs(self, d):
""" Substitute terms in the tree
>>> from blaze.expr.table import TableSymbol
>>> t = TableSymbol('t', '{name: string, amount: int, id: int}')
>>> expr = t['amount'] + 3
>>> expr.subs({3: 4, 'amount': 'id'}).isidentical(t['id'] + 4)
True
"""
return subs(self, d)
def resources(self):
return toolz.merge([arg.resources() for arg in self.args
if isinstance(arg, Expr)])
def subs(o, d):
if o in d:
d = d.copy()
other = d.pop(o)
return subs(other, d)
if isinstance(o, (tuple, list)):
return type(o)([subs(arg, d) for arg in o])
if hasattr(o, 'args'):
newargs = [subs(arg, d) for arg in o.args]
return type(o)(*newargs)
return o
class Scalar(Expr):
pass
| {
"repo_name": "aterrel/blaze",
"path": "blaze/expr/core.py",
"copies": "1",
"size": "1926",
"license": "bsd-3-clause",
"hash": 6890287834447016000,
"line_mean": 24.012987013,
"line_max": 80,
"alpha_frac": 0.5301142264,
"autogenerated": false,
"ratio": 3.661596958174905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46917111845749054,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import traceback
from functools import wraps
from glue.core.session import Session
from glue.core.edit_subset_mode import EditSubsetMode
from glue.core.hub import HubListener
from glue.core import Data, Subset
from glue.core import command
from glue.core.data_factories import load_data
from glue.core.data_collection import DataCollection
from glue.config import settings
from glue.utils import as_list, PropertySetMixin
__all__ = ['Application', 'ViewerBase']
def catch_error(msg):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
m = "%s\n%s" % (msg, str(e))
detail = str(traceback.format_exc())
self = args[0]
self.report_error(m, detail)
return wrapper
return decorator
def as_flat_data_list(data):
datasets = []
if isinstance(data, Data):
datasets.append(data)
else:
for d in data:
datasets.extend(as_flat_data_list(d))
return datasets
class Application(HubListener):
def __init__(self, data_collection=None, session=None):
if session is not None:
self._session = session
session.application = self
self._data = session.data_collection
else:
self._data = data_collection or DataCollection()
self._session = Session(data_collection=self._data,
application=self)
EditSubsetMode().data_collection = self._data
self._hub = self._session.hub
self._cmds = self._session.command_stack
self._cmds.add_callback(lambda x: self._update_undo_redo_enabled())
self._settings = {}
for key, value, validator in settings:
self._settings[key] = [value, validator]
@property
def session(self):
return self._session
@property
def data_collection(self):
return self.session.data_collection
def new_data_viewer(self, viewer_class, data=None):
"""
Create a new data viewer, add it to the UI,
and populate with data
"""
if viewer_class is None:
return
c = viewer_class(self._session)
c.register_to_hub(self._session.hub)
if data and not c.add_data(data):
c.close(warn=False)
return
self.add_widget(c)
c.show()
return c
@catch_error("Failed to save session")
def save_session(self, path, include_data=False):
""" Save the data collection and hub to file.
Can be restored via restore_session
Note: Saving of client is not currently supported. Thus,
restoring this session will lose all current viz windows
"""
from glue.core.state import GlueSerializer
gs = GlueSerializer(self, include_data=include_data)
state = gs.dumps(indent=2)
with open(path, 'w') as out:
out.write(state)
@staticmethod
def restore_session(path):
"""
Reload a previously-saved session
Parameters
----------
path : str
Path to the file to load
Returns
-------
app : :class:`Application`
The loaded application
"""
from glue.core.state import GlueUnSerializer
with open(path) as infile:
state = GlueUnSerializer.load(infile)
return state.object('__main__')
def new_tab(self):
raise NotImplementedError()
def add_widget(self, widget, label=None, tab=None):
raise NotImplementedError()
def close_tab(self):
raise NotImplementedError()
def get_setting(self, key):
"""
Fetch the value of an application setting
"""
return self._settings[key][0]
def set_setting(self, key, value):
"""
Set the value of an application setting
Raises a KeyError if the setting does not exist
Raises a ValueError if the value is invalid
"""
validator = self._settings[key][1]
self._settings[key][0] = validator(value)
@property
def settings(self):
"""Iterate over settings"""
for key, (value, _) in self._settings.items():
yield key, value
@catch_error("Could not load data")
def load_data(self, path):
d = load_data(path)
self.add_datasets(self.data_collection, d)
@catch_error("Could not add data")
def add_data(self, *args, **kwargs):
"""
Add data to the session.
Positional arguments are interpreted using the data factories, while
keyword arguments are interpreted using the same infrastructure as the
`qglue` command.
"""
datasets = []
for path in args:
datasets.append(load_data(path))
links = kwargs.pop('links', None)
from glue.qglue import parse_data, parse_links
for label, data in kwargs.items():
datasets.extend(parse_data(data, label))
self.add_datasets(self.data_collection, datasets)
if links is not None:
self.data_collection.add_link(parse_links(self.data_collection, links))
def report_error(self, message, detail):
""" Report an error message to the user.
Must be implemented in a subclass
Parameters
----------
message : str
The message to display
detail : str
Longer context about the error
"""
raise NotImplementedError()
def do(self, command):
self._cmds.do(command)
def undo(self):
try:
self._cmds.undo()
except RuntimeError:
pass
def redo(self):
try:
self._cmds.redo()
except RuntimeError:
pass
def _update_undo_redo_enabled(self):
raise NotImplementedError()
@classmethod
def add_datasets(cls, data_collection, datasets):
""" Utility method to interactively add datasets to a
data_collection
Parameters
----------
data_collection : :class:`~glue.core.data_collection.DataCollection`
datasets : :class:`~glue.core.data.Data` or list of Data
One or more :class:`~glue.core.data.Data` instances
Adds datasets to the collection
"""
datasets = as_flat_data_list(datasets)
data_collection.extend(datasets)
# We now check whether any of the datasets can be merged. We need to
# make sure that datasets are only ever shown once, as we don't want
# to repeat the menu multiple times.
suggested = []
for data in datasets:
# If the data was already suggested, we skip over it
if data in suggested:
continue
shp = data.shape
other = [d for d in data_collection
if d.shape == shp and d is not data]
# If no other datasets have the same shape, we go to the next one
if not other:
continue
merges, label = cls._choose_merge(data, other)
if merges:
data_collection.merge(*merges, label=label)
suggested.append(data)
suggested.extend(other)
@staticmethod
def _choose_merge(data, other):
"""
Present an interface to the user for approving or rejecting
a proposed data merger. Returns a list of datasets from other
that the user has approved to merge with data
"""
raise NotImplementedError
@property
def viewers(self):
"""Return a tuple of tuples of viewers currently open
The i'th tuple stores the viewers in the i'th close_tab
"""
return []
def set_data_color(self, color, alpha):
"""
Reset all the data colors to that specified.
"""
for data in self.data_collection:
data.style.color = color
data.style.alpha = alpha
def __gluestate__(self, context):
viewers = [list(map(context.id, tab)) for tab in self.viewers]
data = self.session.data_collection
from glue.main import _loaded_plugins
return dict(session=context.id(self.session), viewers=viewers,
data=context.id(data), plugins=_loaded_plugins)
@classmethod
def __setgluestate__(cls, rec, context):
self = cls(data_collection=context.object(rec['data']))
# manually register the newly-created session, which
# the viewers need
context.register_object(rec['session'], self.session)
for i, tab in enumerate(rec['viewers']):
if self.tab(i) is None:
self.new_tab()
for v in tab:
viewer = context.object(v)
self.add_widget(viewer, tab=i, hold_position=True)
return self
class ViewerBase(HubListener, PropertySetMixin):
""" Base class for data viewers in an application """
# the glue.core.layer_artist.LayerArtistContainer
# class/subclass to use
_layer_artist_container_cls = None
def __init__(self, session):
HubListener.__init__(self)
PropertySetMixin.__init__(self)
self._session = session
self._data = session.data_collection
self._hub = None
self._layer_artist_container = self._layer_artist_container_cls()
def register_to_hub(self, hub):
self._hub = hub
def unregister(self, hub):
""" Abstract method to unsubscribe from messages """
raise NotImplementedError
def request_add_layer(self, layer):
""" Issue a command to add a layer """
cmd = command.AddLayer(layer=layer, viewer=self)
self._session.command_stack.do(cmd)
def add_layer(self, layer):
if isinstance(layer, Data):
self.add_data(layer)
elif isinstance(layer, Subset):
self.add_subset(layer)
# else: SubsetGroup
def add_data(self, data):
""" Add a data instance to the viewer
This must be overridden by a subclass
Parameters
----------
data : :class:`~glue.core.data.Data`
Data object to add.
"""
raise NotImplementedError
def add_subset(self, subset):
""" Add a subset to the viewer
This must be overridden by a subclass
Parameters
----------
subset : :class:`~glue.core.subset.Subset`
Subset instance to add.
"""
raise NotImplementedError
def apply_roi(self, roi):
""" Apply an ROI to the client
Parameters
----------
roi : :class:`~glue.core.roi.Roi`
The ROI to apply.
"""
cmd = command.ApplyROI(client=self.client, roi=roi)
self._session.command_stack.do(cmd)
@property
def session(self):
return self._session
@property
def axes(self):
return self.client.axes
def layer_view(self):
raise NotImplementedError()
def options_widget(self):
raise NotImplementedError()
def move(self, x=None, y=None):
""" Reposition a viewer within the application.
x : int, optional
Offset of viewer's left edge from the left edge of the parent
window.
y : int, optional
Offset of the viewer's top edge from the top edge of the parent
window.
"""
raise NotImplementedError()
@property
def position(self):
"""
Return the location of the viewer as a tuple of ``(x, y)``
"""
raise NotImplementedError()
@property
def viewer_size(self):
"""
Return the size of the viewer as a tuple of ``(width, height)``
"""
raise NotImplementedError()
@viewer_size.setter
def viewer_size(self, value):
""" Resize the width and/or height of the viewer
Parameters
----------
value : tuple of int
The width and height of the viewer.
width : int, optional
New width.
height : int, optional
New height.
"""
raise NotImplementedError()
def restore_layers(self, rec, context):
"""
Given a list of glue-serialized layers, restore them
to the viewer
"""
# if this viewer manages a client, rely on it to restore layers
if hasattr(self, 'client'):
return self.client.restore_layers(rec, context)
raise NotImplementedError()
@property
def layers(self):
"""Return a tuple of layers in this viewer.
A layer is a visual representation of a dataset or subset within
the viewer"""
return tuple(self._layer_artist_container)
def __gluestate__(self, context):
return dict(session=context.id(self._session),
size=self.viewer_size,
pos=self.position,
properties=dict((k, context.id(v))
for k, v in self.properties.items()),
layers=list(map(context.do, self.layers))
)
@classmethod
def __setgluestate__(cls, rec, context):
session = context.object(rec['session'])
result = cls(session)
result.register_to_hub(session.hub)
result.viewer_size = rec['size']
x, y = rec['pos']
result.move(x=x, y=y)
prop = dict((k, context.object(v)) for
k, v in rec['properties'].items())
result.restore_layers(rec['layers'], context)
result.properties = prop
return result
| {
"repo_name": "saimn/glue",
"path": "glue/core/application_base.py",
"copies": "1",
"size": "13952",
"license": "bsd-3-clause",
"hash": 947870857902747400,
"line_mean": 27.9460580913,
"line_max": 83,
"alpha_frac": 0.5788417431,
"autogenerated": false,
"ratio": 4.41100221308884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000024698676150958306,
"num_lines": 482
} |
from __future__ import absolute_import, division, print_function
import traceback
import warnings
from .dataarray import DataArray
from .dataset import Dataset
from .pycompat import PY2
class AccessorRegistrationWarning(Warning):
"""Warning for conflicts in accessor registration."""
class _CachedAccessor(object):
"""Custom property-like object (descriptor) for caching accessors."""
def __init__(self, name, accessor):
self._name = name
self._accessor = accessor
def __get__(self, obj, cls):
if obj is None:
# we're accessing the attribute of the class, i.e., Dataset.geo
return self._accessor
try:
accessor_obj = self._accessor(obj)
except AttributeError:
# __getattr__ on data object will swallow any AttributeErrors
# raised when initializing the accessor, so we need to raise as
# something else (GH933):
msg = 'error initializing %r accessor.' % self._name
if PY2:
msg += ' Full traceback:\n' + traceback.format_exc()
raise RuntimeError(msg)
# Replace the property with the accessor object. Inspired by:
# http://www.pydanny.com/cached-property.html
# We need to use object.__setattr__ because we overwrite __setattr__ on
# AttrAccessMixin.
object.__setattr__(obj, self._name, accessor_obj)
return accessor_obj
def _register_accessor(name, cls):
def decorator(accessor):
if hasattr(cls, name):
warnings.warn(
'registration of accessor %r under name %r for type %r is '
'overriding a preexisting attribute with the same name.'
% (accessor, name, cls),
AccessorRegistrationWarning,
stacklevel=2)
setattr(cls, name, _CachedAccessor(name, accessor))
return accessor
return decorator
def register_dataarray_accessor(name):
"""Register a custom accessor on xarray.DataArray objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
See also
--------
register_dataset_accessor
"""
return _register_accessor(name, DataArray)
def register_dataset_accessor(name):
"""Register a custom property on xarray.Dataset objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Examples
--------
In your library code::
import xarray as xr
@xr.register_dataset_accessor('geo')
class GeoAccessor(object):
def __init__(self, xarray_obj):
self._obj = xarray_obj
@property
def center(self):
# return the geographic center point of this dataset
lon = self._obj.latitude
lat = self._obj.longitude
return (float(lon.mean()), float(lat.mean()))
def plot(self):
# plot this array's data on a map, e.g., using Cartopy
pass
Back in an interactive IPython session:
>>> ds = xarray.Dataset({'longitude': np.linspace(0, 10),
... 'latitude': np.linspace(0, 20)})
>>> ds.geo.center
(5.0, 10.0)
>>> ds.geo.plot()
# plots data on a map
See also
--------
register_dataarray_accessor
"""
return _register_accessor(name, Dataset)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/extensions.py",
"copies": "1",
"size": "3666",
"license": "apache-2.0",
"hash": 2109780267357222700,
"line_mean": 29.8067226891,
"line_max": 79,
"alpha_frac": 0.5927441353,
"autogenerated": false,
"ratio": 4.599749058971142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5692493194271142,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import traceback
__all__ = ['die_on_error', 'avoid_circular']
def die_on_error(msg):
"""
Non-GUI version of the decorator in glue.utils.qt.decorators.
In this case we just let the Python exception terminate the execution.
"""
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
print('=' * 72)
print(msg + ' (traceback below)')
print('-' * 72)
traceback.print_exc()
print('=' * 72)
return wrapper
return decorator
def avoid_circular(meth):
def wrapper(self, *args, **kwargs):
if not hasattr(self, '_in_avoid_circular') or not self._in_avoid_circular:
self._in_avoid_circular = True
try:
return meth(self, *args, **kwargs)
finally:
self._in_avoid_circular = False
return wrapper
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/utils/decorators.py",
"copies": "3",
"size": "1050",
"license": "bsd-3-clause",
"hash": -6189258101026877000,
"line_mean": 28.1666666667,
"line_max": 82,
"alpha_frac": 0.5466666667,
"autogenerated": false,
"ratio": 4.251012145748988,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6297678812448988,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import types
import logging
from functools import wraps
import numpy as np
# We avoid importing matplotlib up here otherwise Matplotlib and therefore Qt
# get imported as soon as glue.utils is imported.
from glue.external.axescache import AxesCache
from glue.utils.misc import DeferredMethod
__all__ = ['renderless_figure', 'all_artists', 'new_artists', 'remove_artists',
'get_extent', 'view_cascade', 'fast_limits', 'defer_draw',
'color2rgb', 'point_contour', 'cache_axes', 'DeferDrawMeta']
def renderless_figure():
# Matplotlib figure that skips the render step, for test speed
from mock import MagicMock
import matplotlib.pyplot as plt
fig = plt.figure()
fig.canvas.draw = MagicMock()
plt.close('all')
return fig
def all_artists(fig):
"""
Build a set of all Matplotlib artists in a Figure
"""
return set(item
for axes in fig.axes
for container in [axes.collections, axes.patches, axes.lines,
axes.texts, axes.artists, axes.images]
for item in container)
def new_artists(fig, old_artists):
"""
Find the newly-added artists in a figure
:param fig: Matplotlib figure
:param old_artists: Return value from :func:all_artists
:returns: All artists added since all_artists was called
"""
return all_artists(fig) - old_artists
def remove_artists(artists):
"""
Remove a collection of matplotlib artists from a scene
:param artists: Container of artists
"""
for a in artists:
try:
a.remove()
except ValueError: # already removed
pass
def get_extent(view, transpose=False):
sy, sx = [s for s in view if isinstance(s, slice)]
if transpose:
return (sy.start, sy.stop, sx.start, sx.stop)
return (sx.start, sx.stop, sy.start, sy.stop)
def view_cascade(data, view):
"""
Return a set of views progressively zoomed out of input at roughly constant
pixel count
Parameters
----------
data : array-like
The array to view
view :
The original view into the data
"""
shp = data.shape
v2 = list(view)
logging.debug("image shape: %s, view: %s", shp, view)
# choose stride length that roughly samples entire image
# at roughly the same pixel count
step = max(shp[i - 1] * v.step // max(v.stop - v.start, 1)
for i, v in enumerate(view) if isinstance(v, slice))
step = max(step, 1)
for i, v in enumerate(v2):
if not(isinstance(v, slice)):
continue
v2[i] = slice(0, shp[i - 1], step)
return tuple(v2), view
def _scoreatpercentile(values, percentile, limit=None):
# Avoid using the scipy version since it is available in Numpy
if limit is not None:
values = values[(values >= limit[0]) & (values <= limit[1])]
return np.percentile(values, percentile)
def fast_limits(data, plo, phi):
"""
Quickly estimate percentiles in an array, using a downsampled version
Parameters
----------
data : `numpy.ndarray`
The array to estimate the percentiles for
plo, phi : float
The percentile values
Returns
-------
lo, hi : float
The percentile values
"""
shp = data.shape
view = tuple([slice(None, None, np.intp(max(s / 50, 1))) for s in shp])
values = np.asarray(data)[view]
if ~np.isfinite(values).any():
return (0.0, 1.0)
limits = (-np.inf, np.inf)
lo = _scoreatpercentile(values.flat, plo, limit=limits)
hi = _scoreatpercentile(values.flat, phi, limit=limits)
return lo, hi
def defer_draw(func):
"""
Decorator that globally defers all Agg canvas draws until
function exit.
If a Canvas instance's draw method is invoked multiple times,
it will only be called once after the wrapped function returns.
"""
@wraps(func)
def wrapper(*args, **kwargs):
from matplotlib.backends.backend_agg import FigureCanvasAgg
# don't recursively defer draws
if isinstance(FigureCanvasAgg.draw, DeferredMethod):
return func(*args, **kwargs)
try:
FigureCanvasAgg.draw = DeferredMethod(FigureCanvasAgg.draw)
result = func(*args, **kwargs)
finally:
# We need to use another try...finally block here in case the
# executed deferred draw calls fail for any reason
try:
FigureCanvasAgg.draw.execute_deferred_calls()
finally:
FigureCanvasAgg.draw = FigureCanvasAgg.draw.original_method
return result
wrapper._is_deferred = True
return wrapper
class DeferDrawMeta(type):
"""
Metaclass that decorates all methods on a class with @defer_draw
"""
def __new__(cls, name, bases, attrs):
for attr_name, attr_value in attrs.items():
if isinstance(attr_value, types.FunctionType):
attrs[attr_name] = defer_draw(attr_value)
return type.__new__(cls, name, bases, attrs)
def color2rgb(color):
from matplotlib.colors import ColorConverter
result = ColorConverter().to_rgb(color)
return result
def point_contour(x, y, data):
"""Calculate the contour that passes through (x,y) in data
:param x: x location
:param y: y location
:param data: 2D image
:type data: :class:`numpy.ndarray`
Returns:
* A (nrow, 2column) numpy array. The two columns give the x and
y locations of the contour vertices
"""
try:
from scipy import ndimage
except ImportError:
raise ImportError("Image processing in Glue requires SciPy")
inten = data[y, x]
labeled, nr_objects = ndimage.label(data >= inten)
z = data * (labeled == labeled[y, x])
y, x = np.mgrid[0:data.shape[0], 0:data.shape[1]]
from matplotlib import _cntr
cnt = _cntr.Cntr(x, y, z)
xy = cnt.trace(inten)
if not xy:
return None
xy = xy[0]
return xy
class AxesResizer(object):
def __init__(self, ax, margins):
self.ax = ax
self.margins = margins
@property
def margins(self):
return self._margins
@margins.setter
def margins(self, margins):
self._margins = margins
def on_resize(self, event):
fig_width = self.ax.figure.get_figwidth()
fig_height = self.ax.figure.get_figheight()
x0 = self.margins[0] / fig_width
x1 = 1 - self.margins[1] / fig_width
y0 = self.margins[2] / fig_height
y1 = 1 - self.margins[3] / fig_height
dx = max(0.01, x1 - x0)
dy = max(0.01, y1 - y0)
self.ax.set_position([x0, y0, dx, dy])
self.ax.figure.canvas.draw()
def freeze_margins(axes, margins=[1, 1, 1, 1]):
"""
Make sure margins of axes stay fixed.
Parameters
----------
ax_class : matplotlib.axes.Axes
The axes class for which to fix the margins
margins : iterable
The margins, in inches. The order of the margins is
``[left, right, bottom, top]``
Notes
-----
The object that controls the resizing is stored as the resizer attribute of
the Axes. This can be used to then change the margins:
>> ax.resizer.margins = [0.5, 0.5, 0.5, 0.5]
"""
axes.resizer = AxesResizer(axes, margins)
axes.figure.canvas.mpl_connect('resize_event', axes.resizer.on_resize)
def cache_axes(axes, toolbar):
"""
Set up caching for an axes object.
After this, cached renders will be used to quickly re-render an axes during
window resizing or interactive pan/zooming.
This function returns an AxesCache instance.
Parameters
----------
axes : `~matplotlib.axes.Axes`
The axes to cache
toolbar : `~glue.viewers.common.qt.toolbar.GlueToolbar`
The toolbar managing the axes' canvas
"""
canvas = axes.figure.canvas
cache = AxesCache(axes)
canvas.resize_begin.connect(cache.enable)
canvas.resize_end.connect(cache.disable)
toolbar.pan_begin.connect(cache.enable)
toolbar.pan_end.connect(cache.disable)
return cache
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/utils/matplotlib.py",
"copies": "2",
"size": "8258",
"license": "bsd-3-clause",
"hash": -5841126655630998000,
"line_mean": 26.4352159468,
"line_max": 79,
"alpha_frac": 0.6248486316,
"autogenerated": false,
"ratio": 3.726534296028881,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5351382927628882,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import types
import re
from datashape.typesets import TypeSet, matches_typeset
from datashape import (from_numba_str, to_numba, broadcastable)
from blaze.py2help import _strtypes, PY2
from .. import llvm_array as lla
from .blaze_kernels import BlazeElementKernel, frompyfunc
def process_signature(ranksignature):
"""
Convert list of comma-separated strings into a list of integers showing
the rank of each argument and a list of sets of tuples. Each set
shows dimensions of arguments that must match by indicating a 2-tuple
where the first integer is the argument number (output is argument -1) and
the second integer is the dimension
Examples
--------
>>> from blaze.bkernel.blfuncs import process_signature
>>> process_signature(['M,L', 'L,K', 'K', ''])
([2, 2, 1, 0], [set([(2, 0), (1, 1)]), set([(0, 1), (1, 0)])])
"""
ranklist = [0 if not arg else len(arg.split(',')) for arg in ranksignature]
varmap = {}
for i, arg in enumerate(ranksignature):
if not arg:
continue
for k, var in enumerate(arg.split(',')):
varmap.setdefault(var, []).append((i, k))
connections = [set(val) for val in varmap.values() if len(val) > 1]
return ranklist, connections
def get_signature(typetuple):
sig = [','.join((str(x) for x in arg.shape)) for arg in typetuple]
return process_signature(sig)
def _convert_string(kind, source):
try:
func = getattr(blaze_kernels, 'from%s' % kind)
except AttributeError:
return ValueError("No conversion function for %s found." % kind)
return func(source)
# Parse numba-style calling string convention
# and construct dshapes
regex = re.compile('([^(]*)[(]([^)]*)[)]')
def to_dshapes(mystr, output=False):
ma = regex.match(mystr)
if ma is None:
raise ValueError("Cannot understand signature string % s" % mystr)
else:
ret, args = ma.groups()
result = tuple(from_numba_str(x) for x in args.split(','))
if output:
result += (from_numba_str(ret),)
return result
def convert_kernel(value, dshapes=None):
template = None
from llvm.core import FunctionType
if isinstance(value, tuple): # Ex: ('cpp', source) or ('f8(f8,f8)', _add)
if len(value) == 2 and isinstance(value[1], types.FunctionType) and \
isinstance(value[0], _strtypes):
if '*' in value[0]:
return None, (to_dshapes(value[0]), value[1])
krnl = frompyfunc(value[1], value[0]) # use Numba
elif len(value) == 2 and isinstance(value[1], _strtypes) and \
isinstance(value[0], _strtypes):
krnl = _convert_string(value[0], value[1]) # Use blaze_kernels.from<val0>
else:
raise TypeError("Cannot parse kernel specification %s" % value)
elif isinstance(value, types.FunctionType):
# Called when a function is used for value directly
# dshapes must be present as the mapping and as datashapes
istemplate = any(isinstance(ds, TypeSet) for ds in dshapes[:-1])
if istemplate:
krnl = None
template = (dshapes[:-1], value)
else:
args = ','.join(str(to_numba(ds)) for ds in dshapes[:-1])
signature = '{0}({1})'.format(str(to_numba(dshapes[-1])), args)
krnl = frompyfunc(value, signature, dshapes=dshapes)
elif isinstance(value, FunctionType):
# Called the LLVM Function is used in directly
krnl = BlazeElementKernel(value, dshapes=dshapes)
else:
raise TypeError("Cannot convert value = %s and dshapes = %s" % (value, dshapes))
return krnl, template
def process_typetable(typetable):
"""
Process type-table dictionary which maps a signature list with
(input-type1, input-type2, output_type) to a kernel into a
lookup-table dictionary which maps an input-only signature list
to a kernel matching those inputs. The output
is placed with a tuple of the output-type plus the signature
So far it assumes the types all have the same rank
and deduces the signature from the first kernel found
Also allows the typetable to have "templates" which don't resolve to
kernels and are used if no matching kernel can be found.
templates are list of 2-tuple (input signature data-shape, template)
Numba will be used to jit the template at call-time to create a
BlazeElementKernel. The input signature is a tuple
of data-shape objects and TypeSets
"""
newtable = {}
templates = []
if isinstance(typetable, list):
for item in typetable:
krnl, template = convert_kernel(item)
if template is None:
in_shapes = krnl.dshapes[:-1]
newtable[in_shapes] = krnl
else:
templates.append(template)
else:
for key, value in typetable.items():
if not isinstance(value, BlazeElementKernel):
value, template = convert_kernel(value, dshapes=key)
if template is None:
in_shapes = value.dshapes[:-1]
newtable[in_shapes] = value
else:
templates.append(template)
# FIXME:
# Assumes the same ranklist and connections for all the keys
if len(newtable.values()) > 0:
key = next(iter(newtable.values())).dshapes
ranklist, connections = get_signature(key)
else: # Currently templates are all rank-0
ranklist = [0]*len(templates[0][0])
connections = []
return ranklist, connections, newtable, templates
# Define the Blaze Function
# * A Blaze Function is a callable that takes Concrete Arrays and returns
# Deferred Concrete Arrays
# * At the core of the Blaze Function is a kernel which is a type-resolved
# element-wise expression graph where elements can be any contiguous
# primitive type (right-most part of the data-shape)
# * Kernels have a type signature which we break up into the rank-signature
# and the primitive type signature because a BlazeFuncDeprecated will have one
# rank-signature but possibly multiple primitive type signatures.
# * Example BlazeFuncDeprecateds are sin, svd, eig, fft, sum, prod, inner1d, add, mul
# etc --- kernels all work on in-memory "elements"
class BlazeFuncDeprecated(object):
# DEPRECATION NOTE:
# This particular blaze func class is being deprecated in favour of
# a new implementation, using the pykit system. Functionality will
# be moved/copied from here as needed until this class can be removed.
def __init__(self, name, typetable=None, template=None, inouts=[]):
"""
Construct a Blaze Function from a rank-signature and keyword arguments.
The typetable is a dictionary with keys a tuple of types
and values as corresponding BlazeScalarKernel objects. The
tuple of types has Input types first with the Output Type last
Arguments
=========
name : string
Name of the Blaze Function.
typetable : dict
Dictionary mapping argument types to blaze kernels.
The kernels must all be BlazeElementKernel instances or
convertible to BlazeElementKernel via the following
mechanisms:
python-function: converted via numba
llvm-function: directly wrapped
ctypes-function: wrapped via an llvm function call
inouts : list of integers
A list of the parameter indices which may be written to
in addition to read from. (NotImplemented)
"""
self.name = name
if typetable is None:
self.ranks = None
self.rankconnect = []
self.dispatch = {}
self.templates = []
else:
res = process_typetable(typetable)
self.ranks, self.rankconnect, self.dispatch, self.templates = res
self.inouts = inouts
self._add_template(template)
def _add_template(self, template):
if template is None:
return
if lla.isiterable(template):
for temp in template:
self._add_template_sub(temp)
else:
self._add_template_sub(template)
def _add_template_sub(self, template):
if isinstance(template, tuple):
self.add_template(template[0], signature=template[1])
else:
self.add_template(template)
@property
def nin(self):
return len(self.ranks)-1
def compatible(self, args):
# check for broadcastability
# TODO: figure out correct types as well
dshapes = [arg._data.dshape for arg in args]
return broadcastable(dshapes, self.ranks,
rankconnect=self.rankconnect)
# FIXME: This just does a dumb look-up
# assumes input kernels all have the same rank
def find_best_kernel(self, types):
mtypes = [ds.sigform() for ds in types]
ranks = [len(ds)-1 for ds in types]
test_rank = min(ranks)
mtypes = tuple(ds.subarray(rank-test_rank)
for ds, rank in zip(mtypes, ranks))
krnl = None
while test_rank >= 0:
krnl = self.dispatch.get(mtypes, None)
if krnl is not None or test_rank==0:
break
test_rank -= 1
mtypes = tuple(ds.subarray(1) for ds in mtypes)
# Templates Only works for "measures"
if krnl is None:
measures = tuple(ds.measure for ds in types)
for sig, template in self.templates:
if sig == measures or matches_typeset(measures, sig):
krnl = frompyfunc(template, [to_numba(x) for x in measures])
self.dispatch[measures] = krnl
break
if krnl is None:
raise ValueError("Did not find matching kernel for " + str(mtypes))
return krnl
def add_funcs(self, value):
res = process_typetable(value)
ranklist, connections, newtable, templates = res
if self.ranks is None:
self.ranks = ranklist
self.rankconnect = connections
self.dispatch.update(newtable)
self.templates.extend(templates)
def add_template(self, func, signature=None):
if signature is None:
fc = func.func_code if PY2 else func.__code__
signature = '*(%s)' % (','.join(['*']*fc.co_argcount))
keysig = to_dshapes(signature)
self.templates.append((keysig, func))
# All templates are 0-rank
if self.ranks is None:
self.ranks = [0]*len(keysig)
| {
"repo_name": "zeeshanali/blaze",
"path": "blaze/compute/bkernel/blaze_func.py",
"copies": "2",
"size": "10904",
"license": "bsd-3-clause",
"hash": 6671404794311050000,
"line_mean": 36.9930313589,
"line_max": 88,
"alpha_frac": 0.6161958914,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.56161958914,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import types
import datashape
from datashape import dshape, var, datetime_, float32, int64, bool_
from datashape.util.testing import assert_dshape_equal
import pandas as pd
import pytest
from blaze.compatibility import pickle
from blaze.expr import (
Expr,
Field,
Node,
coalesce,
label,
symbol,
transform,
)
def test_arguments():
assert Expr._arguments == ('_child',)
assert Node._arguments == ('_child',)
def test_Symbol():
e = symbol('e', '3 * 5 * {name: string, amount: int}')
assert e.dshape == dshape('3 * 5 * {name: string, amount: int}')
assert e.shape == (3, 5)
assert str(e) == 'e'
def test_symbol_caches():
assert symbol('e', 'int') is symbol('e', 'int')
def test_Symbol_tokens():
assert symbol('x', 'int').isidentical(symbol('x', 'int'))
assert not symbol('x', 'int').isidentical(symbol('x', 'int', 1))
def test_Field():
e = symbol('e', '3 * 5 * {name: string, amount: int}')
assert 'name' in dir(e)
assert e.name.dshape == dshape('3 * 5 * string')
assert e.name.schema == dshape('string')
assert e.amount._name == 'amount'
def test_nested_fields():
e = symbol(
'e', '3 * {name: string, payments: var * {amount: int, when: datetime}}')
assert e.payments.dshape == dshape(
'3 * var * {amount: int, when: datetime}')
assert e.payments.schema == dshape('{amount: int, when: datetime}')
assert 'amount' in dir(e.payments)
assert e.payments.amount.dshape == dshape('3 * var * int')
@pytest.mark.xfail
def test_partialed_methods_have_docstrings():
e = symbol('e', '3 * 5 * {name: string, amount: int}')
assert 'string comparison' in e.name.str.like.__doc__
def test_relabel():
e = symbol('e', '{name: string, amount: int}')
assert e.relabel(amount='balance').fields == ['name', 'balance']
def test_meaningless_relabel_doesnt_change_input():
e = symbol('e', '{name: string, amount: int}')
assert e.relabel(amount='amount').isidentical(e)
def test_relabel_with_invalid_identifiers_reprs_as_dict():
s = symbol('s', '{"0": int64}')
assert str(s.relabel({'0': 'foo'})) == "s.relabel({'0': 'foo'})"
def test_dir():
e = symbol('e', '3 * 5 * {name: string, amount: int, x: real}')
assert 'name' in dir(e)
assert 'name' not in dir(e.name)
assert 'isnan' in dir(e.x)
assert 'isnan' not in dir(e.amount)
def test_label():
e = symbol('e', '3 * int')
assert e._name == 'e'
assert label(e, 'foo')._name == 'foo'
assert label(e, 'e').isidentical(e)
def test_fields_with_spaces():
e = symbol('e', '{x: int, "a b": int}')
assert isinstance(e['a b'], Field)
assert 'a b' not in dir(e)
assert 'a_b' in dir(e)
assert e.a_b.isidentical(e['a b'])
def test_selection_name_matches_child():
t = symbol('t', 'var * {x: int, "a.b": int}')
assert t.x[t.x > 0]._name == t.x._name
assert t.x[t.x > 0].fields == t.x.fields
def test_symbol_subs():
assert symbol('e', '{x: int}').isidentical(symbol('e', '{x: int}', None))
assert symbol('e', '{x: int}').isidentical(symbol('e', dshape('{x: int}')))
e = symbol('e', '{x: int, y: int}')
f = symbol('f', '{x: int, y: int}')
d = {'e': 'f'}
assert e._subs(d).isidentical(f)
def test_multiple_renames_on_series_fails():
t = symbol('s', 'var * {timestamp: datetime}')
with pytest.raises(ValueError):
t.timestamp.relabel(timestamp='date', hello='world')
def test_map_with_rename():
t = symbol('s', 'var * {timestamp: datetime}')
result = t.timestamp.map(lambda x: x.date(), schema='{date: datetime}')
with pytest.raises(ValueError):
result.relabel(timestamp='date')
assert result.fields == ['date']
def test_non_option_does_not_have_notnull():
s = symbol('s', '5 * int32')
assert not hasattr(s, 'notnull')
def test_notnull_dshape():
assert symbol('s', '5 * ?int32').notnull().dshape == 5 * bool_
def test_hash_to_different_values():
s = symbol('s', var * datetime_)
expr = s >= pd.Timestamp('20121001')
expr2 = s >= '20121001'
assert expr2 & expr is not None
assert hash(expr) == hash(expr2)
def test_hash():
e = symbol('e', 'int')
h = hash(e)
assert isinstance(h, int)
assert h == hash(e)
assert hash(symbol('e', 'int')) == hash(symbol('e', 'int'))
f = symbol('f', 'int')
assert hash(e) != hash(f)
assert hash(e._subs({'e': 'f'})) != hash(e)
assert hash(e._subs({'e': 'f'})) == hash(f)
@pytest.mark.parametrize('dshape', [var * float32,
dshape('var * float32'),
'var * float32'])
def test_coerce(dshape):
s = symbol('s', dshape)
expr = s.coerce('int64')
assert str(expr) == "s.coerce(to='int64')"
assert expr.dshape == var * int64
assert expr.schema == datashape.dshape('int64')
assert expr.schema == expr.to
@pytest.mark.xfail(raises=AttributeError, reason='Should this be valid?')
def test_coerce_record():
s = symbol('s', 'var * {a: int64, b: float64}')
expr = s.coerce('{a: float64, b: float32}')
assert str(expr) == "s.coerce(to='{a: float64, b: float32}')"
def test_method_before_name():
t = symbol('t', 'var * {isin: int64, max: float64, count: int64}')
assert isinstance(t['isin'], Field)
assert isinstance(t['max'], Field)
assert isinstance(t.max, Field)
assert isinstance(t.isin, Field)
assert isinstance(t['isin'].isin, types.MethodType)
assert isinstance(t['max'].max, types.MethodType)
assert isinstance(t.max.max, types.MethodType)
assert isinstance(t.isin.isin, types.MethodType)
with pytest.raises(AttributeError):
t.count.max()
def test_pickle_roundtrip():
t = symbol('t', 'var * int64')
expr = (t + 1).mean() # some expression with more than one node.
assert expr.isidentical(pickle.loads(
pickle.dumps(expr, protocol=pickle.HIGHEST_PROTOCOL),
))
def test_coalesce():
# check case where lhs is not optional
s = symbol('s', 'int32')
t = symbol('t', 'int32')
expr = coalesce(s, t)
assert expr.isidentical(s)
s_expr = s + s
t_expr = t * 3
expr = coalesce(s_expr, t_expr)
assert expr.isidentical(s_expr)
a = symbol('a', 'string')
b = symbol('b', 'string')
expr = coalesce(a, b)
assert expr.isidentical(a)
a_expr = a + a
b_expr = b * 3
expr = coalesce(a_expr, b_expr)
assert expr.isidentical(a_expr)
c = symbol('c', 'var * {a: int32, b: int32}')
d = symbol('d', 'var * {a: int32, b: int32}')
expr = coalesce(c, d)
assert expr.isidentical(c)
c_expr = transform(c, a=c.a + 1)
d_expr = transform(d, a=d.a * 3)
expr = coalesce(c_expr, d_expr)
assert expr.isidentical(c_expr)
# check case where lhs is null dshape
u = symbol('u', 'null')
expr = coalesce(u, s)
assert expr.isidentical(s)
expr = coalesce(u, a)
assert expr.isidentical(a)
expr = coalesce(u, c)
assert expr.isidentical(c)
# check optional lhs non-optional rhs
v = symbol('v', '?int32')
expr = coalesce(v, s)
# rhs is not optional so the expression cannot be null
assert_dshape_equal(expr.dshape, dshape('int32'))
assert expr.lhs.isidentical(v)
assert expr.rhs.isidentical(s)
e = symbol('e', '?string')
expr = coalesce(e, a)
assert_dshape_equal(expr.dshape, dshape('string'))
assert expr.lhs.isidentical(e)
assert expr.rhs.isidentical(a)
f = symbol('f', '?{a: int32, b: int32}')
expr = coalesce(f, c)
assert_dshape_equal(expr.dshape, dshape('{a: int32, b: int32}'))
assert expr.lhs.isidentical(f)
assert expr.rhs.isidentical(c)
# check optional lhs non-optional rhs with promotion
w = symbol('w', 'int64')
expr = coalesce(v, w)
# rhs is not optional so the expression cannot be null
# there are no either types in datashape so we are a type large enough
# to hold either result
assert_dshape_equal(expr.dshape, dshape('int64'))
assert expr.lhs.isidentical(v)
assert expr.rhs.isidentical(w)
# check optional lhs and rhs
x = symbol('x', '?int32')
expr = coalesce(v, x)
# rhs and lhs are optional so this might be null
assert_dshape_equal(expr.dshape, dshape('?int32'))
assert expr.lhs.isidentical(v)
assert expr.rhs.isidentical(x)
# check optional lhs and rhs with promotion
y = symbol('y', '?int64')
expr = coalesce(v, y)
# rhs and lhs are optional so this might be null
# there are no either types in datashape so we are a type large enough
# to hold either result
assert_dshape_equal(expr.dshape, dshape('?int64'))
assert expr.lhs.isidentical(v)
assert expr.rhs.isidentical(y)
@pytest.mark.xfail(TypeError, reason='currently invalid type promotion')
@pytest.mark.parametrize('lhs,rhs,expected', (
('?{a: int32}', '{a: int64}', '{a: int64}'),
('?{a: int32}', '?{a: int64}', '?{a: int64}'),
))
def test_coalesce_invalid_promotion(lhs, rhs, expected):
# Joe 2016-03-16: imho promote(record, record) should check that the keys
# are the same and then create a new record from:
# zip(keys, map(promote, lhs, rhs))
f = symbol('e', lhs)
g = symbol('g', rhs)
expr = coalesce(f, g)
assert_dshape_equal(expr.dshape, dshape(expected))
assert expr.lhs.isidentical(f)
assert expr.rhs.isidentical(g)
def test_cast():
s = symbol('s', 'int32')
assert_dshape_equal(s.cast('int64').dshape, dshape('int64'))
assert_dshape_equal(s.cast(dshape('int64')).dshape, dshape('int64'))
assert_dshape_equal(s.cast('var * int32').dshape, dshape('var * int32'))
assert_dshape_equal(
s.cast(dshape('var * int64')).dshape,
dshape('var * int64'),
)
assert_dshape_equal(s.cast('var * int64').dshape, dshape('var * int64'))
assert_dshape_equal(
s.cast(dshape('var * int64')).dshape,
dshape('var * int64'),
)
def test_drop_field():
s = symbol('s', 'var * {a: int32, b: int64, c: float32, d: float64}')
# dropping one field preserves order
assert s.drop_field('a').isidentical(s[['b', 'c', 'd']])
assert s.drop_field('b').isidentical(s[['a', 'c', 'd']])
assert s.drop_field('c').isidentical(s[['a', 'b', 'd']])
assert s.drop_field('d').isidentical(s[['a', 'b', 'c']])
# dropping two fields preserves order
assert s.drop_field('a', 'b').isidentical(s[['c', 'd']])
assert s.drop_field('a', 'c').isidentical(s[['b', 'd']])
assert s.drop_field('b', 'c').isidentical(s[['a', 'd']])
assert s.drop_field('b', 'd').isidentical(s[['a', 'c']])
# test dropping a field that is not actually a field of ``expr``.
with pytest.raises(ValueError) as e:
s.drop_field('fake')
assert (
str(e.value) ==
"fields ['fake'] were not in the fields of expr (['a', 'b', 'c', 'd'])"
)
# test dropping a multiple fields that is not actually a field of ``expr``.
with pytest.raises(ValueError) as e:
s.drop_field('e', 'f')
assert (
str(e.value) ==
"fields ['e', 'f'] were not in the fields of expr"
" (['a', 'b', 'c', 'd'])"
)
# test dropping a mix of correct and missing fields
with pytest.raises(ValueError) as e:
s.drop_field('a', 'fake')
assert (
str(e.value) ==
"fields ['fake'] were not in the fields of expr (['a', 'b', 'c', 'd'])"
)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/tests/test_expr.py",
"copies": "3",
"size": "11527",
"license": "bsd-3-clause",
"hash": 5820691553834571000,
"line_mean": 29.820855615,
"line_max": 81,
"alpha_frac": 0.600242908,
"autogenerated": false,
"ratio": 3.067323044172432,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0000326072779444372,
"num_lines": 374
} |
from __future__ import absolute_import, division, print_function
import types
import datashape
from datashape import dshape, var, datetime_, float32, int64, bool_
import pandas as pd
import pytest
from blaze.compatibility import pickle
from blaze.expr import symbol, label, Field, Expr, Node
def test_slots():
assert Expr.__slots__ == ('_hash', '__weakref__', '__dict__')
assert Node.__slots__ == ()
def test_Symbol():
e = symbol('e', '3 * 5 * {name: string, amount: int}')
assert e.dshape == dshape('3 * 5 * {name: string, amount: int}')
assert e.shape == (3, 5)
assert str(e) == 'e'
def test_symbol_caches():
assert symbol('e', 'int') is symbol('e', 'int')
def test_Symbol_tokens():
assert symbol('x', 'int').isidentical(symbol('x', 'int'))
assert not symbol('x', 'int').isidentical(symbol('x', 'int', 1))
def test_Field():
e = symbol('e', '3 * 5 * {name: string, amount: int}')
assert 'name' in dir(e)
assert e.name.dshape == dshape('3 * 5 * string')
assert e.name.schema == dshape('string')
assert e.amount._name == 'amount'
def test_nested_fields():
e = symbol(
'e', '3 * {name: string, payments: var * {amount: int, when: datetime}}')
assert e.payments.dshape == dshape(
'3 * var * {amount: int, when: datetime}')
assert e.payments.schema == dshape('{amount: int, when: datetime}')
assert 'amount' in dir(e.payments)
assert e.payments.amount.dshape == dshape('3 * var * int')
def test_partialed_methods_have_docstrings():
e = symbol('e', '3 * 5 * {name: string, amount: int}')
assert 'string comparison' in e.name.like.__doc__
def test_relabel():
e = symbol('e', '{name: string, amount: int}')
assert e.relabel(amount='balance').fields == ['name', 'balance']
def test_meaningless_relabel_doesnt_change_input():
e = symbol('e', '{name: string, amount: int}')
assert e.relabel(amount='amount').isidentical(e)
def test_relabel_with_invalid_identifiers_reprs_as_dict():
s = symbol('s', '{"0": int64}')
assert repr(s.relabel({'0': 'foo'})) == "s.relabel({'0': 'foo'})"
def test_dir():
e = symbol('e', '3 * 5 * {name: string, amount: int, x: real}')
assert 'name' in dir(e)
assert 'name' not in dir(e.name)
assert 'isnan' in dir(e.x)
assert 'isnan' not in dir(e.amount)
def test_label():
e = symbol('e', '3 * int')
assert e._name == 'e'
assert label(e, 'foo')._name == 'foo'
assert label(e, 'e').isidentical(e)
def test_fields_with_spaces():
e = symbol('e', '{x: int, "a b": int}')
assert isinstance(e['a b'], Field)
assert 'a b' not in dir(e)
assert 'a_b' in dir(e)
assert e.a_b.isidentical(e['a b'])
def test_fields_with_spaces():
e = symbol('e', '{x: int, "a.b": int}')
assert isinstance(e['a.b'], Field)
assert 'a.b' not in dir(e)
assert 'a_b' in dir(e)
assert e.a_b.isidentical(e['a.b'])
def test_selection_name_matches_child():
t = symbol('t', 'var * {x: int, "a.b": int}')
assert t.x[t.x > 0]._name == t.x._name
assert t.x[t.x > 0].fields == t.x.fields
def test_symbol_subs():
assert symbol('e', '{x: int}') is symbol('e', '{x: int}', None)
assert symbol('e', '{x: int}') is symbol('e', dshape('{x: int}'))
e = symbol('e', '{x: int, y: int}')
f = symbol('f', '{x: int, y: int}')
d = {'e': 'f'}
assert e._subs(d) is f
def test_multiple_renames_on_series_fails():
t = symbol('s', 'var * {timestamp: datetime}')
with pytest.raises(ValueError):
t.timestamp.relabel(timestamp='date', hello='world')
def test_map_with_rename():
t = symbol('s', 'var * {timestamp: datetime}')
result = t.timestamp.map(lambda x: x.date(), schema='{date: datetime}')
with pytest.raises(ValueError):
result.relabel(timestamp='date')
assert result.fields == ['date']
def test_non_option_does_not_have_notnull():
s = symbol('s', '5 * int32')
assert not hasattr(s, 'notnull')
def test_notnull_dshape():
assert symbol('s', '5 * ?int32').notnull().dshape == 5 * bool_
def test_hash_to_different_values():
s = symbol('s', var * datetime_)
expr = s >= pd.Timestamp('20121001')
expr2 = s >= '20121001'
assert expr2 & expr is not None
assert hash(expr) == hash(expr2)
@pytest.mark.parametrize('dshape', [var * float32,
dshape('var * float32'),
'var * float32'])
def test_coerce(dshape):
s = symbol('s', dshape)
expr = s.coerce('int64')
assert str(expr) == "s.coerce(to='int64')"
assert expr.dshape == var * int64
assert expr.schema == datashape.dshape('int64')
assert expr.schema == expr.to
@pytest.mark.xfail(raises=AttributeError, reason='Should this be valid?')
def test_coerce_record():
s = symbol('s', 'var * {a: int64, b: float64}')
expr = s.coerce('{a: float64, b: float32}')
assert str(expr) == "s.coerce(to='{a: float64, b: float32}')"
def test_method_before_name():
t = symbol('t', 'var * {isin: int64, max: float64, count: int64}')
assert isinstance(t['isin'], Field)
assert isinstance(t['max'], Field)
assert isinstance(t.max, Field)
assert isinstance(t.isin, Field)
assert isinstance(t['isin'].isin, types.MethodType)
assert isinstance(t['max'].max, types.MethodType)
assert isinstance(t.max.max, types.MethodType)
assert isinstance(t.isin.isin, types.MethodType)
with pytest.raises(AttributeError):
t.count.max()
def test_pickle_roundtrip():
t = symbol('t', 'var * int64')
expr = (t + 1).mean() # some expression with more than one node.
assert expr.isidentical(pickle.loads(pickle.dumps(expr)))
| {
"repo_name": "cpcloud/blaze",
"path": "blaze/expr/tests/test_expr.py",
"copies": "2",
"size": "5714",
"license": "bsd-3-clause",
"hash": -9188664547619333000,
"line_mean": 29.2328042328,
"line_max": 81,
"alpha_frac": 0.5994049702,
"autogenerated": false,
"ratio": 3.1071234366503533,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9706205784576512,
"avg_score": 0.00006452445476835722,
"num_lines": 189
} |
from __future__ import absolute_import, division, print_function
import unicodedata
import numpy as np
from .. import Variable, coding
from ..core.pycompat import OrderedDict, basestring, unicode_type
# Special characters that are permitted in netCDF names except in the
# 0th position of the string
_specialchars = '_.@+- !"#$%&\()*,:;<=>?[]^`{|}~'
# The following are reserved names in CDL and may not be used as names of
# variables, dimension, attributes
_reserved_names = set(['byte', 'char', 'short', 'ushort', 'int', 'uint',
'int64', 'uint64', 'float' 'real', 'double', 'bool',
'string'])
# These data-types aren't supported by netCDF3, so they are automatically
# coerced instead as indicated by the "coerce_nc3_dtype" function
_nc3_dtype_coercions = {'int64': 'int32', 'bool': 'int8'}
# encode all strings as UTF-8
STRING_ENCODING = 'utf-8'
def coerce_nc3_dtype(arr):
"""Coerce an array to a data type that can be stored in a netCDF-3 file
This function performs the following dtype conversions:
int64 -> int32
bool -> int8
Data is checked for equality, or equivalence (non-NaN values) with
`np.allclose` with the default keyword arguments.
"""
dtype = str(arr.dtype)
if dtype in _nc3_dtype_coercions:
new_dtype = _nc3_dtype_coercions[dtype]
# TODO: raise a warning whenever casting the data-type instead?
cast_arr = arr.astype(new_dtype)
if not (cast_arr == arr).all():
raise ValueError('could not safely cast array from dtype %s to %s'
% (dtype, new_dtype))
arr = cast_arr
return arr
def encode_nc3_attr_value(value):
if isinstance(value, bytes):
pass
elif isinstance(value, unicode_type):
value = value.encode(STRING_ENCODING)
else:
value = coerce_nc3_dtype(np.atleast_1d(value))
if value.ndim > 1:
raise ValueError("netCDF attributes must be 1-dimensional")
return value
def encode_nc3_attrs(attrs):
return OrderedDict([(k, encode_nc3_attr_value(v))
for k, v in attrs.items()])
def encode_nc3_variable(var):
for coder in [coding.strings.EncodedStringCoder(allows_unicode=False),
coding.strings.CharacterArrayCoder()]:
var = coder.encode(var)
data = coerce_nc3_dtype(var.data)
attrs = encode_nc3_attrs(var.attrs)
return Variable(var.dims, data, attrs, var.encoding)
def _isalnumMUTF8(c):
"""Return True if the given UTF-8 encoded character is alphanumeric
or multibyte.
Input is not checked!
"""
return c.isalnum() or (len(c.encode('utf-8')) > 1)
def is_valid_nc3_name(s):
"""Test whether an object can be validly converted to a netCDF-3
dimension, variable or attribute name
Earlier versions of the netCDF C-library reference implementation
enforced a more restricted set of characters in creating new names,
but permitted reading names containing arbitrary bytes. This
specification extends the permitted characters in names to include
multi-byte UTF-8 encoded Unicode and additional printing characters
from the US-ASCII alphabet. The first character of a name must be
alphanumeric, a multi-byte UTF-8 character, or '_' (reserved for
special names with meaning to implementations, such as the
"_FillValue" attribute). Subsequent characters may also include
printing special characters, except for '/' which is not allowed in
names. Names that have trailing space characters are also not
permitted.
"""
if not isinstance(s, basestring):
return False
if not isinstance(s, unicode_type):
s = s.decode('utf-8')
num_bytes = len(s.encode('utf-8'))
return ((unicodedata.normalize('NFC', s) == s) and
(s not in _reserved_names) and
(num_bytes >= 0) and
('/' not in s) and
(s[-1] != ' ') and
(_isalnumMUTF8(s[0]) or (s[0] == '_')) and
all((_isalnumMUTF8(c) or c in _specialchars for c in s)))
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/backends/netcdf3.py",
"copies": "1",
"size": "4111",
"license": "apache-2.0",
"hash": -7908629870655553000,
"line_mean": 35.3805309735,
"line_max": 78,
"alpha_frac": 0.6489905133,
"autogenerated": false,
"ratio": 3.7924354243542435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9940540981902031,
"avg_score": 0.00017699115044247788,
"num_lines": 113
} |
from __future__ import absolute_import, division, print_function
import unittest
from datetime import date, time, datetime
import blaze
from datashape import dshape
from blaze.datadescriptor import ddesc_as_py
class TestDate(unittest.TestCase):
def test_create(self):
a = blaze.array(date(2000, 1, 1))
self.assertEqual(a.dshape, dshape('date'))
self.assertEqual(ddesc_as_py(a.ddesc), date(2000, 1, 1))
a = blaze.array([date(1490, 3, 12), date(2020, 7, 15)])
self.assertEqual(a.dshape, dshape('2 * date'))
self.assertEqual(list(a), [date(1490, 3, 12), date(2020, 7, 15)])
a = blaze.array(['1490-03-12', '2020-07-15'], dshape='date')
self.assertEqual(a.dshape, dshape('2 * date'))
self.assertEqual(list(a), [date(1490, 3, 12), date(2020, 7, 15)])
def test_properties(self):
a = blaze.array(['1490-03-12', '2020-07-15'], dshape='date')
self.assertEqual(list(a.year), [1490, 2020])
self.assertEqual(list(a.month), [3, 7])
self.assertEqual(list(a.day), [12, 15])
class TestTime(unittest.TestCase):
def test_create(self):
a = blaze.array(time(14, 30))
self.assertEqual(a.dshape, dshape('time'))
self.assertEqual(ddesc_as_py(a.ddesc), time(14, 30))
a = blaze.array([time(14, 30), time(12, 25, 39, 123456)])
self.assertEqual(a.dshape, dshape('2 * time'))
self.assertEqual(list(a), [time(14, 30), time(12, 25, 39, 123456)])
a = blaze.array(['2:30 pm', '12:25:39.123456'], dshape='time')
self.assertEqual(a.dshape, dshape('2 * time'))
self.assertEqual(list(a), [time(14, 30), time(12, 25, 39, 123456)])
def test_properties(self):
a = blaze.array([time(14, 30), time(12, 25, 39, 123456)], dshape='time')
self.assertEqual(list(a.hour), [14, 12])
self.assertEqual(list(a.minute), [30, 25])
self.assertEqual(list(a.second), [0, 39])
self.assertEqual(list(a.microsecond), [0, 123456])
class TestDateTime(unittest.TestCase):
def test_create(self):
a = blaze.array(datetime(1490, 3, 12, 14, 30))
self.assertEqual(a.dshape, dshape('datetime'))
self.assertEqual(ddesc_as_py(a.ddesc), datetime(1490, 3, 12, 14, 30))
a = blaze.array([datetime(1490, 3, 12, 14, 30),
datetime(2020, 7, 15, 12, 25, 39, 123456)])
self.assertEqual(a.dshape, dshape('2 * datetime'))
self.assertEqual(list(a), [datetime(1490, 3, 12, 14, 30),
datetime(2020, 7, 15, 12, 25, 39, 123456)])
a = blaze.array(['1490-mar-12 2:30 pm', '2020-07-15T12:25:39.123456'],
dshape='datetime')
self.assertEqual(a.dshape, dshape('2 * datetime'))
self.assertEqual(list(a), [datetime(1490, 3, 12, 14, 30),
datetime(2020, 7, 15, 12, 25, 39, 123456)])
def test_properties(self):
a = blaze.array([datetime(1490, 3, 12, 14, 30),
datetime(2020, 7, 15, 12, 25, 39, 123456)],
dshape='datetime')
self.assertEqual(list(a.date), [date(1490, 3, 12), date(2020, 7, 15)])
self.assertEqual(list(a.time), [time(14, 30), time(12, 25, 39, 123456)])
self.assertEqual(list(a.year), [1490, 2020])
self.assertEqual(list(a.month), [3, 7])
self.assertEqual(list(a.day), [12, 15])
self.assertEqual(list(a.hour), [14, 12])
self.assertEqual(list(a.minute), [30, 25])
self.assertEqual(list(a.second), [0, 39])
self.assertEqual(list(a.microsecond), [0, 123456])
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/tests/test_datetime.py",
"copies": "2",
"size": "3637",
"license": "bsd-3-clause",
"hash": 6781223384999112000,
"line_mean": 46.8552631579,
"line_max": 80,
"alpha_frac": 0.5812482816,
"autogenerated": false,
"ratio": 3.0717905405405403,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46530388221405405,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import ctypes
from datashape import dshape
from blaze.datadescriptor import data_descriptor_from_ctypes, dd_as_py
class TestCTypesMemBufDataDescriptor(unittest.TestCase):
def test_scalar(self):
a = ctypes.c_int(3)
dd = data_descriptor_from_ctypes(a, writable=True)
self.assertEqual(dd.dshape, dshape('int32'))
self.assertEqual(dd_as_py(dd), 3)
self.assertTrue(isinstance(dd_as_py(dd), int))
a = ctypes.c_float(3.25)
dd = data_descriptor_from_ctypes(a, writable=True)
self.assertEqual(dd.dshape, dshape('float32'))
self.assertEqual(dd_as_py(dd), 3.25)
self.assertTrue(isinstance(dd_as_py(dd), float))
def test_1d_array(self):
a = (ctypes.c_short * 32)()
for i in range(32):
a[i] = 2*i
dd = data_descriptor_from_ctypes(a, writable=True)
self.assertEqual(dd.dshape, dshape('32, int16'))
self.assertEqual(dd_as_py(dd), [2*i for i in range(32)])
a = (ctypes.c_double * 32)()
for i in range(32):
a[i] = 1.5*i
dd = data_descriptor_from_ctypes(a, writable=True)
self.assertEqual(dd.dshape, dshape('32, float64'))
self.assertEqual(dd_as_py(dd), [1.5*i for i in range(32)])
def test_2d_array(self):
a = (ctypes.c_double * 35 * 32)()
vals = [[2**i + j for i in range(35)] for j in range(32)]
for i in range(32):
for j in range(35):
a[i][j] = vals[i][j]
dd = data_descriptor_from_ctypes(a, writable=True)
self.assertEqual(dd.dshape, dshape('32, 35, float64'))
self.assertEqual(dd_as_py(dd), vals)
a = (ctypes.c_uint8 * 35 * 32)()
vals = [[i + j*2 for i in range(35)] for j in range(32)]
for i in range(32):
for j in range(35):
a[i][j] = vals[i][j]
dd = data_descriptor_from_ctypes(a, writable=True)
self.assertEqual(dd.dshape, dshape('32, 35, uint8'))
self.assertEqual(dd_as_py(dd), vals)
def test_3d_array(self):
# Simple 3D array
a = (ctypes.c_uint32 * 10 * 12 * 14)()
vals = [[[(i + 2*j + 3*k)
for i in range(10)]
for j in range(12)]
for k in range(14)]
for i in range(14):
for j in range(12):
for k in range(10):
a[i][j][k] = vals[i][j][k]
dd = data_descriptor_from_ctypes(a, writable=True)
self.assertEqual(dd.dshape, dshape('14, 12, 10, uint32'))
self.assertEqual(dd_as_py(dd), vals)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "aaronmartin0303/blaze",
"path": "blaze/datadescriptor/tests/test_ctypes_membuf_data_descriptor.py",
"copies": "1",
"size": "2747",
"license": "bsd-3-clause",
"hash": -2510047899395444000,
"line_mean": 35.1447368421,
"line_max": 70,
"alpha_frac": 0.552238806,
"autogenerated": false,
"ratio": 3.2016317016317015,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4253870507631701,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import ctypes
import blaze
from blaze.datadescriptor import data_descriptor_from_ctypes
class TestArrayStr(unittest.TestCase):
def test_scalar(self):
self.assertEqual(str(blaze.array(100)), '100')
self.assertEqual(str(blaze.array(-3.25)), '-3.25')
self.assertEqual(str(blaze.array(True)), 'True')
self.assertEqual(str(blaze.array(False)), 'False')
def test_deferred_scalar(self):
a = blaze.array(3) + blaze.array(5)
self.assertEqual(str(a), '8')
def test_ctypes_scalar(self):
dd = data_descriptor_from_ctypes(ctypes.c_int32(1022), writable=True)
a = blaze.array(dd)
self.assertEqual(str(a), '1022')
def test_1d_array(self):
self.assertEqual(str(blaze.array([1,2,3])), '[1 2 3]')
def test_ctypes_1d_array(self):
cdat = (ctypes.c_int64 * 3)()
cdat[0] = 3
cdat[1] = 6
cdat[2] = 10
dd = data_descriptor_from_ctypes(cdat, writable=True)
a = blaze.array(dd)
self.assertEqual(str(a), '[ 3 6 10]')
def test_ragged_array(self):
a = blaze.array([[1,2,3],[4,5]])
self.assertEqual(str(a),
'[[ 1 2 3]\n [ 4 5]]')
def test_empty_array(self):
a = blaze.array([[], []])
self.assertEqual(str(a), '[[]\n []]')
a = blaze.array([[], [1, 2]])
self.assertEqual(str(a), '[[]\n [ 1 2]]')
def test_str_array(self):
# Basically check that it doesn't raise an exception to
# get the string
a = blaze.array(['this', 'is', 'a', 'test'])
self.assertTrue(str(a) != '')
self.assertTrue(repr(a) != '')
def test_struct_array(self):
# Basically check that it doesn't raise an exception to
# get the string
a = blaze.array([(1, 2), (3, 4), (5, 6)],
dshape='{x: int32; y: float64}')
self.assertTrue(str(a) != '')
self.assertTrue(repr(a) != '')
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"repo_name": "aaronmartin0303/blaze",
"path": "blaze/tests/test_array_str.py",
"copies": "1",
"size": "2135",
"license": "bsd-3-clause",
"hash": -5126686716650982000,
"line_mean": 31.8461538462,
"line_max": 77,
"alpha_frac": 0.5489461358,
"autogenerated": false,
"ratio": 3.1913303437967113,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4240276479596711,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import math
import blaze
from blaze.datadescriptor import ddesc_as_py
class TestBasic(unittest.TestCase):
def test_add(self):
types = ['int8', 'int16', 'int32', 'int64']
for type_ in types:
a = blaze.array(range(3), dshape=type_)
c = blaze.eval(a+a)
self.assertEqual(ddesc_as_py(c.ddesc), [0, 2, 4])
c = blaze.eval(((a+a)*a))
self.assertEqual(ddesc_as_py(c.ddesc), [0, 2, 8])
def test_add_with_pyobj(self):
a = blaze.array(3) + 3
self.assertEqual(ddesc_as_py(a.ddesc), 6)
a = 3 + blaze.array(4)
self.assertEqual(ddesc_as_py(a.ddesc), 7)
a = blaze.array([1, 2]) + 4
self.assertEqual(ddesc_as_py(a.ddesc), [5, 6])
a = [1, 2] + blaze.array(5)
self.assertEqual(ddesc_as_py(a.ddesc), [6, 7])
#FIXME: Need to convert uint8 from dshape to ctypes
# in _get_ctypes of blaze_kernel.py
def test_mixed(self):
types1 = ['int8', 'int16', 'int32', 'int64']
types2 = ['int16', 'int32', 'float32', 'float64']
for ty1, ty2 in zip(types1, types2):
a = blaze.array(range(1,6), dshape=ty1)
b = blaze.array(range(5), dshape=ty2)
c = (a+b)*(a-b)
c = blaze.eval(c)
result = [a*a - b*b for (a,b) in zip(range(1,6),range(5))]
self.assertEqual(ddesc_as_py(c.ddesc), result)
def test_ragged(self):
a = blaze.array([[1], [2, 3], [4, 5, 6]])
b = blaze.array([[1, 2, 3], [4, 5], [6]])
c = blaze.eval(a + b)
self.assertEqual(ddesc_as_py(c.ddesc),
[[2, 3, 4], [6, 8], [10, 11, 12]])
c = blaze.eval(2 * a - b)
self.assertEqual(ddesc_as_py(c.ddesc),
[[1, 0, -1], [0, 1], [2, 4, 6]])
class TestReduction(unittest.TestCase):
def test_min_zerosize(self):
# Empty min operations should raise, because it has no
# reduction identity
self.assertRaises(ValueError, blaze.eval, blaze.min([]))
self.assertRaises(ValueError, blaze.eval, blaze.min([], keepdims=True))
self.assertRaises(ValueError, blaze.eval, blaze.min([[], []]))
self.assertRaises(ValueError, blaze.eval, blaze.min([[], []],
keepdims=True))
self.assertRaises(ValueError, blaze.eval, blaze.min([[], []], axis=-1))
self.assertRaises(ValueError, blaze.eval, blaze.min([[], []],
axis=-1,
keepdims=True))
# However, if we're only reducing on a non-empty dimension, it's ok
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[], []],
axis=0)).ddesc),
[])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[], []],
axis=0,
keepdims=True)).ddesc),
[[]])
def test_min(self):
# Min element of scalar case is the element itself
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min(10)).ddesc), 10)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min(-5.0)).ddesc), -5.0)
# One-dimensional size one
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([10])).ddesc), 10)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([-5.0])).ddesc), -5.0)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([-5.0],
axis=0)).ddesc), -5.0)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([10],
keepdims=True)).ddesc),
[10])
# One dimensional
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([1, 2])).ddesc), 1)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([2, 1])).ddesc), 1)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([0, 1, 0])).ddesc), 0)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([0, 1, 0])).ddesc), 0)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([1, 0, 2])).ddesc), 0)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([2, 1, 0])).ddesc), 0)
# Two dimensional, test with minimum at all possible positions
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[1, 2, 3],
[4, 5, 6]])).ddesc), 1)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[2, 1, 3],
[4, 5, 6]])).ddesc), 1)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[3, 2, 1],
[4, 5, 6]])).ddesc), 1)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[3, 2, 5],
[4, 1, 6]])).ddesc), 1)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[3, 2, 5],
[4, 6, 1]])).ddesc), 1)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[3, 2, 5],
[1, 6, 4]])).ddesc), 1)
# Two dimensional, with axis= argument both positive and negative
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[1, 5, 3],
[4, 2, 6]],
axis=0)).ddesc),
[1, 2, 3])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[1, 5, 3],
[4, 2, 6]],
axis=-2)).ddesc),
[1, 2, 3])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[1, 2, 3],
[4, 5, 6]],
axis=1)).ddesc),
[1, 4])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[1, 2, 3],
[4, 5, 6]],
axis=-1)).ddesc),
[1, 4])
# Two dimensional, with keepdims=True
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[1, 2, 3],
[4, 5, 6]],
keepdims=True)).ddesc),
[[1]])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[1, 2, 3],
[5, 4, 6]],
axis=0,
keepdims=True)).ddesc),
[[1, 2, 3]])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.min([[1, 5, 3],
[4, 2, 6]],
axis=1,
keepdims=True)).ddesc),
[[1], [2]])
def test_sum_zerosize(self):
# Empty sum operations should produce 0, the reduction identity
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([])).ddesc), 0)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([],
keepdims=True)).ddesc),
[0])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[], []])).ddesc), 0)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[], []],
keepdims=True)).ddesc),
[[0]])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[], []],
axis=-1)).ddesc),
[0, 0])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[], []],
axis=-1,
keepdims=True)).ddesc),
[[0], [0]])
# If we're only reducing on a non-empty dimension, we might still
# end up with zero-sized outputs
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[], []],
axis=0)).ddesc),
[])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[], []],
axis=0,
keepdims=True)).ddesc),
[[]])
def test_sum(self):
# Sum of scalar case is the element itself
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum(10)).ddesc), 10)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum(-5.0)).ddesc), -5.0)
# One-dimensional size one
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([10])).ddesc), 10)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([-5.0])).ddesc), -5.0)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([-5.0],
axis=0)).ddesc), -5.0)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([10],
keepdims=True)).ddesc),
[10])
# One dimensional
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([1, 2])).ddesc), 3)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([0, 1, 2])).ddesc), 3)
# Two dimensional
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[1, 2, 3],
[4, 5, 6]])).ddesc), 21)
# Two dimensional, with axis= argument both positive and negative
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[1, 5, 3],
[4, 2, 6]],
axis=0)).ddesc),
[5, 7, 9])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[1, 5, 3],
[4, 2, 6]],
axis=-2)).ddesc),
[5, 7, 9])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[1, 2, 3],
[4, 5, 6]],
axis=1)).ddesc),
[6, 15])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[1, 2, 3],
[4, 5, 6]],
axis=-1)).ddesc),
[6, 15])
# Two dimensional, with keepdims=True
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[1, 2, 3],
[4, 5, 6]],
keepdims=True)).ddesc),
[[21]])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[1, 2, 3],
[5, 4, 6]],
axis=0,
keepdims=True)).ddesc),
[[6, 6, 9]])
self.assertEqual(ddesc_as_py(blaze.eval(blaze.sum([[1, 5, 3],
[4, 2, 6]],
axis=1,
keepdims=True)).ddesc),
[[9], [12]])
def test_all(self):
# Sanity check of reduction op
self.assertEqual(ddesc_as_py(blaze.eval(blaze.all(True)).ddesc), True)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.all(False)).ddesc), False)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.all(blaze.array([], dshape='0 * bool'))).ddesc), True)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.all([False, True])).ddesc),
False)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.all([True, True])).ddesc),
True)
def test_any(self):
# Sanity check of reduction op
self.assertEqual(ddesc_as_py(blaze.eval(blaze.any(True)).ddesc), True)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.any(False)).ddesc), False)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.any(blaze.array([], dshape='0 * bool'))).ddesc), False)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.any([False, True])).ddesc),
True)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.any([False, False])).ddesc),
False)
def test_max(self):
# Sanity check of reduction op
self.assertEqual(ddesc_as_py(blaze.eval(blaze.max(5)).ddesc), 5)
self.assertRaises(ValueError, blaze.eval, blaze.max([]))
self.assertEqual(ddesc_as_py(blaze.eval(blaze.max([3, -2])).ddesc),
3)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.max([1.5, 2.0])).ddesc),
2.0)
def test_product(self):
# Sanity check of reduction op
self.assertEqual(ddesc_as_py(blaze.eval(blaze.product(5)).ddesc), 5)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.product([])).ddesc), 1)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.product([3, -2])).ddesc),
-6)
self.assertEqual(ddesc_as_py(blaze.eval(blaze.product([1.5, 2.0])).ddesc),
3.0)
class TestRolling(unittest.TestCase):
def test_rolling_mean(self):
a = blaze.eval(blaze.rolling_mean([1., 3, 4, 2, 5], window=4))
self.assertTrue(all(math.isnan(x) for x in a[:3]))
self.assertEqual(list(a[3:]), [10./4, 14./4])
def test_diff(self):
a = blaze.eval(blaze.diff([1., 2, 4, 4, 2, 0]))
self.assertTrue(math.isnan(a[0]))
self.assertEqual(list(a[1:]), [1, 2, 0, -2, -2])
class TestTake(unittest.TestCase):
def test_masked_take(self):
a = blaze.take([1, 3, 5, 7], [True, False, True, False])
self.assertEqual(list(a), [1, 5])
x = blaze.array([(1, "test"), (2, "one"), (3, "two"), (4, "three")],
dshape="{x: int, y: string}")
a = blaze.take(x, [True, True, True, True])
self.assertEqual(list(a), list(x))
a = blaze.take(x, [True, True, False, False])
self.assertEqual(list(a), [x[0], x[1]])
a = blaze.take(x, [False, False, True, True])
self.assertEqual(list(a), [x[2], x[3]])
a = blaze.take(x, [True, False, True, False])
self.assertEqual(list(a), [x[0], x[2]])
def test_indexed_take(self):
a = blaze.take([1, 3, 5, 7], [-1, -2, -3, -4, 0, 1, 2, 3])
self.assertEqual(list(a), [7, 5, 3, 1, 1, 3, 5, 7])
x = blaze.array([(1, "test"), (2, "one"), (3, "two"), (4, "three")],
dshape="{x: int, y: string}")
a = blaze.take(x, [2, -3])
self.assertEqual(list(a), [x[2], x[-3]])
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/tests/test_calc.py",
"copies": "1",
"size": "15622",
"license": "bsd-3-clause",
"hash": -8707900961701460000,
"line_mean": 51.9559322034,
"line_max": 109,
"alpha_frac": 0.4357956728,
"autogenerated": false,
"ratio": 3.585494606380537,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9506679899669886,
"avg_score": 0.0029220759021301623,
"num_lines": 295
} |
from __future__ import absolute_import, division, print_function
import unittest
import os
import os.path
from astropy.io import fits
import numpy as np
import desispec.scripts.preproc
from desispec.preproc import preproc, _parse_sec_keyword, _clipped_std_bias
from desispec import io
def xy2hdr(xyslice):
'''
convert 2D slice into IRAF style [a:b,c:d] header value
e.g. xyslice2header(np.s_[0:10, 5:20]) -> '[6:20,1:10]'
'''
yy, xx = xyslice
value = '[{}:{},{}:{}]'.format(xx.start+1, xx.stop, yy.start+1, yy.stop)
return value
class TestPreProc(unittest.TestCase):
def tearDown(self):
for filename in [self.calibfile, self.rawfile, self.pixfile]:
if os.path.exists(filename):
os.remove(filename)
def setUp(self):
self.calibfile = 'test-calib-askjapqwhezcpasehadfaqp.fits'
self.rawfile = 'test-raw-askjapqwhezcpasehadfaqp.fits'
self.pixfile = 'test-pix-askjapqwhezcpasehadfaqp.fits'
hdr = dict()
hdr['CAMERA'] = 'b0'
hdr['DATE-OBS'] = '2018-09-23T08:17:03.988'
#- [x,y] 1-indexed for FITS; in reality the amps will be symmetric
#- but the header definitions don't require that to make sure we are
#- getting dimensions correct
#- Dimensions per amp, not full 4-quad CCD
self.ny = ny = 500
self.nx = nx = 400
self.noverscan = nover = 50
#- BIASSEC = overscan region in raw image
#- DATASEC = data region in raw image
#- CCDSEC = where should this go in output
hdr['BIASSEC1'] = xy2hdr(np.s_[0:ny, nx:nx+nover])
hdr['DATASEC1'] = xy2hdr(np.s_[0:ny, 0:nx])
hdr['CCDSEC1'] = xy2hdr(np.s_[0:ny, 0:nx])
hdr['BIASSEC2'] = xy2hdr(np.s_[0:ny, nx+nover:nx+2*nover])
hdr['DATASEC2'] = xy2hdr(np.s_[0:ny, nx+2*nover:nx+2*nover+nx])
hdr['CCDSEC2'] = xy2hdr(np.s_[0:ny, nx:nx+nx])
hdr['BIASSEC3'] = xy2hdr(np.s_[ny:ny+ny, nx:nx+nover])
hdr['DATASEC3'] = xy2hdr(np.s_[ny:ny+ny, 0:nx])
hdr['CCDSEC3'] = xy2hdr(np.s_[ny:ny+ny, 0:nx])
hdr['BIASSEC4'] = xy2hdr(np.s_[ny:ny+ny, nx+nover:nx+2*nover])
hdr['DATASEC4'] = xy2hdr(np.s_[ny:ny+ny, nx+2*nover:nx+2*nover+nx])
hdr['CCDSEC4'] = xy2hdr(np.s_[ny:ny+ny, nx:nx+nx])
hdr['NIGHT'] = '20150102'
hdr['EXPID'] = 1
self.header = hdr
self.rawimage = np.zeros((2*self.ny, 2*self.nx+2*self.noverscan))
self.offset = {'1':100.0, '2':100.5, '3':50.3, '4':200.4}
self.gain = {'1':1.0, '2':1.5, '3':0.8, '4':1.2}
self.rdnoise = {'1':2.0, '2':2.2, '3':2.4, '4':2.6}
self.quad = {
'1': np.s_[0:ny, 0:nx], '2': np.s_[0:ny, nx:nx+nx],
'3': np.s_[ny:ny+ny, 0:nx], '4': np.s_[ny:ny+ny, nx:nx+nx],
}
for amp in ('1', '2', '3', '4'):
self.header['GAIN'+amp] = self.gain[amp]
self.header['RDNOISE'+amp] = self.rdnoise[amp]
xy = _parse_sec_keyword(hdr['BIASSEC'+amp])
shape = [xy[0].stop-xy[0].start, xy[1].stop-xy[1].start]
self.rawimage[xy] += self.offset[amp]
self.rawimage[xy] += np.random.normal(scale=self.rdnoise[amp], size=shape)/self.gain[amp]
xy = _parse_sec_keyword(hdr['DATASEC'+amp])
shape = [xy[0].stop-xy[0].start, xy[1].stop-xy[1].start]
self.rawimage[xy] += self.offset[amp]
self.rawimage[xy] += np.random.normal(scale=self.rdnoise[amp], size=shape)/self.gain[amp]
#- raw data are integers, not floats
self.rawimage = self.rawimage.astype(np.int32)
#- Confirm that all regions were correctly offset
assert not np.any(self.rawimage == 0.0)
def test_preproc(self):
image = preproc(self.rawimage, self.header)
self.assertEqual(image.pix.shape, (2*self.ny, 2*self.nx))
self.assertTrue(np.all(image.ivar <= 1/image.readnoise**2))
for amp in ('1', '2', '3', '4'):
pix = image.pix[self.quad[amp]]
rdnoise = np.median(image.readnoise[self.quad[amp]])
npixover = self.ny * self.noverscan
self.assertAlmostEqual(np.mean(pix), 0.0, delta=3*rdnoise/np.sqrt(npixover))
self.assertAlmostEqual(np.std(pix), self.rdnoise[amp], delta=0.2)
self.assertAlmostEqual(rdnoise, self.rdnoise[amp], delta=0.2)
def test_bias(self):
image = preproc(self.rawimage, self.header, bias=False)
bias = np.zeros(self.rawimage.shape)
image = preproc(self.rawimage, self.header, bias=bias)
fits.writeto(self.calibfile, bias)
image = preproc(self.rawimage, self.header, bias=self.calibfile)
with self.assertRaises(ValueError):
image = preproc(self.rawimage, self.header, bias=bias[0:10, 0:10])
def test_pixflat(self):
image = preproc(self.rawimage, self.header, pixflat=False)
pixflat = np.ones_like(image.pix)
image = preproc(self.rawimage, self.header, pixflat=pixflat)
fits.writeto(self.calibfile, pixflat)
image = preproc(self.rawimage, self.header, pixflat=self.calibfile)
with self.assertRaises(ValueError):
image = preproc(self.rawimage, self.header, pixflat=pixflat[0:10, 0:10])
def test_mask(self):
image = preproc(self.rawimage, self.header, mask=False)
mask = np.random.randint(0, 2, size=image.pix.shape)
image = preproc(self.rawimage, self.header, mask=mask)
self.assertTrue(np.all(image.mask == mask))
fits.writeto(self.calibfile, mask)
image = preproc(self.rawimage, self.header, mask=self.calibfile)
self.assertTrue(np.all(image.mask == mask))
with self.assertRaises(ValueError):
image = preproc(self.rawimage, self.header, mask=mask[0:10, 0:10])
def test_pixflat_mask(self):
from desispec.maskbits import ccdmask
pixflat = np.ones((2*self.ny, 2*self.nx))
pixflat[0:10, 0:10] = 0.0
pixflat[10:20, 10:20] = 0.05
image = preproc(self.rawimage, self.header, pixflat=pixflat)
self.assertTrue(np.all(image.mask[0:10,0:10] & ccdmask.PIXFLATZERO))
self.assertTrue(np.all(image.mask[10:20,10:20] & ccdmask.PIXFLATLOW))
def test_io(self):
io.write_raw(self.rawfile, self.rawimage, self.header, camera='b0')
io.write_raw(self.rawfile, self.rawimage, self.header, camera='R1')
io.write_raw(self.rawfile, self.rawimage, self.header, camera='z9')
self.header['CAMERA'] = 'B1'
io.write_raw(self.rawfile, self.rawimage, self.header)
b0 = io.read_raw(self.rawfile, 'b0')
b1 = io.read_raw(self.rawfile, 'b1')
r1 = io.read_raw(self.rawfile, 'r1')
z9 = io.read_raw(self.rawfile, 'Z9')
self.assertEqual(b0.meta['CAMERA'], 'b0')
self.assertEqual(b1.meta['CAMERA'], 'b1')
self.assertEqual(r1.meta['CAMERA'], 'r1')
self.assertEqual(z9.meta['CAMERA'], 'z9')
def test_32_64(self):
'''
64-bit integers aren't supported for compressed HDUs;
make sure we handle that gracefully
'''
data64 = np.linspace(0, 2**60, 10, dtype=np.int64)
datasmall64 = np.linspace(0, 2**30, 10, dtype=np.int64)
data32 = np.linspace(0, 2**30, 10, dtype=np.int32)
data16 = np.linspace(0, 2**10, 10, dtype=np.int16)
#- Primary HDU should be blank
#- Should be written as vanilla ImageHDU
io.write_raw(self.rawfile, data64, self.header, camera='b0')
#- Should be written as vanilla ImageHDU
io.write_raw(self.rawfile, data64, self.header, camera='b1')
#- Should be converted to 32-bit CompImageHDU
io.write_raw(self.rawfile, datasmall64, self.header, camera='b2')
#- Should be 32-bit CompImageHDU
io.write_raw(self.rawfile, data32, self.header, camera='b3')
#- Should be 16-bit CompImageHDU
io.write_raw(self.rawfile, data16, self.header, camera='b4')
fx = fits.open(self.rawfile)
#- Blank PrimaryHDU should have been inserted
self.assertTrue(isinstance(fx[0], fits.PrimaryHDU))
self.assertTrue(fx[0].data == None)
#- 64-bit image written uncompressed after blank HDU
self.assertTrue(isinstance(fx[1], fits.ImageHDU))
self.assertEqual(fx[1].data.dtype, np.dtype('>i8'))
self.assertEqual(fx[1].header['EXTNAME'], 'B0')
#- 64-bit image written uncompressed
self.assertTrue(isinstance(fx[2], fits.ImageHDU))
self.assertEqual(fx[2].data.dtype, np.dtype('>i8'))
self.assertEqual(fx[2].header['EXTNAME'], 'B1')
#- 64-bit image with small numbers converted to 32-bit compressed
self.assertTrue(isinstance(fx[3], fits.CompImageHDU))
self.assertEqual(fx[3].data.dtype, np.int32)
self.assertEqual(fx[3].header['EXTNAME'], 'B2')
#- 32-bit image written compressed
self.assertTrue(isinstance(fx[4], fits.CompImageHDU))
self.assertEqual(fx[4].data.dtype, np.int32)
self.assertEqual(fx[4].header['EXTNAME'], 'B3')
#- 16-bit image written compressed
self.assertTrue(isinstance(fx[5], fits.CompImageHDU))
self.assertEqual(fx[5].data.dtype, np.int16)
self.assertEqual(fx[5].header['EXTNAME'], 'B4')
def test_keywords(self):
for keyword in self.header.keys():
#- Missing GAIN* and RDNOISE* are warnings but not errors
if keyword.startswith('GAIN') or keyword.startswith('RDNOISE'):
continue
#- DATE-OBS, NIGHT, and EXPID are also optional
#- (but maybe they should be required...)
if keyword in ('DATE-OBS', 'NIGHT', 'EXPID'):
continue
if os.path.exists(self.rawfile):
os.remove(self.rawfile)
value = self.header[keyword]
del self.header[keyword]
with self.assertRaises(KeyError):
io.write_raw(self.rawfile, self.rawimage, self.header)
self.header[keyword] = value
dateobs = self.header
#- striving for 100% coverage...
def test_pedantic(self):
with self.assertRaises(ValueError):
_parse_sec_keyword('blat')
#- should log a warning about large readnoise
rawimage = self.rawimage + np.random.normal(scale=2, size=self.rawimage.shape)
image = preproc(rawimage, self.header)
#- should log an error about huge readnoise
rawimage = self.rawimage + np.random.normal(scale=10, size=self.rawimage.shape)
image = preproc(rawimage, self.header)
#- should log a warning about small readnoise
rdnoise = 0.7 * np.mean(self.rdnoise.values())
rawimage = np.random.normal(scale=rdnoise, size=self.rawimage.shape)
image = preproc(rawimage, self.header)
#- should log a warning about tiny readnoise
rdnoise = 0.01 * np.mean(self.rdnoise.values())
rawimage = np.random.normal(scale=rdnoise, size=self.rawimage.shape)
image = preproc(rawimage, self.header)
#- Missing expected RDNOISE keywords shouldn't be fatal
hdr = self.header.copy()
del hdr['RDNOISE1']
del hdr['RDNOISE2']
del hdr['RDNOISE3']
del hdr['RDNOISE4']
image = preproc(self.rawimage, hdr)
#- Missing expected GAIN keywords should log error but not crash
hdr = self.header.copy()
del hdr['GAIN1']
del hdr['GAIN2']
del hdr['GAIN3']
del hdr['GAIN4']
image = preproc(self.rawimage, hdr)
def test_preproc_script(self):
io.write_raw(self.rawfile, self.rawimage, self.header, camera='b0')
io.write_raw(self.rawfile, self.rawimage, self.header, camera='b1')
args = ['--infile', self.rawfile, '--cameras', 'b1',
'--pixfile', self.pixfile]
if os.path.exists(self.pixfile):
os.remove(self.pixfile)
desispec.scripts.preproc.main(args)
img = io.read_image(self.pixfile)
self.assertEqual(img.pix.shape, (2*self.ny, 2*self.nx))
def test_clipped_std_bias(self):
'''Compare to www.wolframalpha.com integrals'''
self.assertAlmostEqual(_clipped_std_bias(1), 0.53956, places=5)
self.assertAlmostEqual(_clipped_std_bias(2), 0.879626, places=6)
self.assertAlmostEqual(_clipped_std_bias(3), 0.986578, places=6)
np.random.seed(1)
x = np.random.normal(size=1000000)
biased_std = np.std(x[np.abs(x)<3])
self.assertAlmostEqual(biased_std, _clipped_std_bias(3), places=3)
#- Not implemented yet, but flag these as expectedFailures instead of
#- successful tests of raising NotImplementedError
@unittest.expectedFailure
def test_default_bias(self):
image = preproc(self.rawimage, self.header, bias=True)
@unittest.expectedFailure
def test_default_pixflat(self):
image = preproc(self.rawimage, self.header, pixflat=True)
@unittest.expectedFailure
def test_default_mask(self):
image = preproc(self.rawimage, self.header, mask=True)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "gdhungana/desispec",
"path": "py/desispec/test/test_preproc.py",
"copies": "2",
"size": "13446",
"license": "bsd-3-clause",
"hash": 912529586417514100,
"line_mean": 42.3741935484,
"line_max": 101,
"alpha_frac": 0.6002528633,
"autogenerated": false,
"ratio": 3.16153303550435,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47617858988043504,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import os
import tempfile
from blaze.datadescriptor import (
JSONDataDescriptor, DyNDDataDescriptor, IDataDescriptor, dd_as_py)
# TODO: This isn't actually being used!
_json_buf = u"""{
"type": "ImageCollection",
"images": [
"Image": {
"Width": 800,
"Height": 600,
"Title": "View from 15th Floor",
"Thumbnail": {
"Url": "http://www.example.com/image/481989943",
"Height": 125,
"Width": "100"
},
"IDs": [116, 943, 234, 38793]
}
]
}
"""
# TODO: This isn't actually being used!
_json_schema = """{
type: string,
images: var * {
Width: int16,
Height: int16,
Title: string,
Thumbnail: {
Url: string,
Height: int16,
Width: int16,
},
IDs: var * int32,
};
}
"""
json_buf = u"[1, 2, 3, 4, 5]"
json_schema = "var * int8"
class TestJSONDataDescriptor(unittest.TestCase):
def setUp(self):
handle, self.json_file = tempfile.mkstemp(".json")
with os.fdopen(handle, "w") as f:
f.write(json_buf)
def tearDown(self):
os.remove(self.json_file)
def test_basic_object_type(self):
self.assertTrue(issubclass(JSONDataDescriptor, IDataDescriptor))
dd = JSONDataDescriptor(self.json_file, schema=json_schema)
self.assertTrue(isinstance(dd, IDataDescriptor))
self.assertEqual(dd_as_py(dd), [1, 2, 3, 4, 5])
def test_iter(self):
dd = JSONDataDescriptor(self.json_file, schema=json_schema)
# This equality does not work yet
# self.assertEqual(dd.dshape, datashape.dshape(
# 'Var, %s' % json_schema))
# Iteration should produce DyNDDataDescriptor instances
vals = []
for el in dd:
self.assertTrue(isinstance(el, DyNDDataDescriptor))
self.assertTrue(isinstance(el, IDataDescriptor))
vals.append(dd_as_py(el))
self.assertEqual(vals, [1, 2, 3, 4, 5])
def test_getitem(self):
dd = JSONDataDescriptor(self.json_file, schema=json_schema)
el = dd[1:3]
self.assertTrue(isinstance(el, DyNDDataDescriptor))
vals = dd_as_py(el)
self.assertEqual(vals, [2,3])
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "mwiebe/blaze",
"path": "blaze/datadescriptor/tests/test_json_data_descriptor.py",
"copies": "2",
"size": "2444",
"license": "bsd-3-clause",
"hash": 937997391621558800,
"line_mean": 26.4606741573,
"line_max": 72,
"alpha_frac": 0.5683306056,
"autogenerated": false,
"ratio": 3.4914285714285715,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5059759177028572,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import sys
if sys.version_info <= (3, 0):
from mock import patch, Mock
else:
from unittest.mock import patch, Mock
from panoptes_client.subject_set import SubjectSet
class TestSubjectSet(unittest.TestCase):
def test_create(self):
with patch('panoptes_client.panoptes.Panoptes') as pc:
pc.client().post = Mock(return_value=(
{
'subject_sets': [{
'id': 0,
'display_name': '',
}],
},
'',
))
subject_set = SubjectSet()
subject_set.links.project = 1234
subject_set.display_name = 'Name'
subject_set.save()
pc.client().post.assert_called_with(
'/subject_sets',
json={
'subject_sets': {
'display_name': 'Name',
'links': {
'project': 1234,
}
}
},
etag=None,
)
| {
"repo_name": "zooniverse/panoptes-python-client",
"path": "panoptes_client/tests/test_subject_set.py",
"copies": "1",
"size": "1195",
"license": "apache-2.0",
"hash": 6573846257469512000,
"line_mean": 27.4523809524,
"line_max": 64,
"alpha_frac": 0.4343096234,
"autogenerated": false,
"ratio": 4.742063492063492,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 42
} |
from __future__ import absolute_import, division, print_function
import unittest
import tempfile
import os
import csv
import sys
from collections import Iterator
import datashape
from datashape import dshape
from blaze.compatibility import skipIf
from blaze.data.core import DataDescriptor
from blaze.data import CSV
from blaze.data.csv import has_header, discover_dialect
from blaze.utils import filetext
from blaze.data.utils import tuplify
from dynd import nd
from nose.tools import assert_equal
def sanitize(lines):
return '\n'.join(line.strip() for line in lines.split('\n'))
class Test_Other(unittest.TestCase):
def test_schema_detection_modifiers(self):
text = "name amount date\nAlice 100 20120101\nBob 200 20120102"
with filetext(text) as fn:
self.assertEqual(CSV(fn).schema,
dshape('{name: string, amount: int64, date: int64}'))
self.assertEqual(CSV(fn, columns=['NAME', 'AMOUNT', 'DATE']).schema,
dshape('{NAME: string, AMOUNT: int64, DATE: int64}'))
self.assertEqual(
str(CSV(fn, types=['string', 'int32', 'date']).schema),
str(dshape('{name: string, amount: int32, date: date}')))
a = CSV(fn, typehints={'date': 'date'}).schema
b = dshape('{name: string, amount: int64, date: date}')
self.assertEqual(str(a), str(b))
def test_homogenous_schema(self):
text = "1,1\n2,2\n3,3"
with filetext(text) as fn:
self.assertEqual(CSV(fn, columns=['x', 'y']).schema,
dshape('{x: int64, y: int64}'))
def test_a_mode(self):
text = ("id, name, balance\n1, Alice, 100\n2, Bob, 200\n"
"3, Charlie, 300\n4, Denis, 400\n5, Edith, 500")
with filetext(text) as fn:
csv = CSV(fn, 'a')
csv.extend([(6, 'Frank', 600),
(7, 'Georgina', 700)])
assert 'Georgina' in set(csv.py[:, 'name'])
def test_sep_kwarg(self):
csv = CSV('foo', 'w', sep=';', schema='{x: int, y: int}')
self.assertEqual(csv.dialect['delimiter'], ';')
def test_columns(self):
# This is really testing the core interface
dd = CSV('foo', 'w', schema='{name: string, amount: int}')
assert list(dd.columns) == ['name', 'amount']
class Test_Indexing(unittest.TestCase):
buf = sanitize(
u"""Name Amount
Alice 100
Bob 200
Alice 50
""")
schema = "{ name: string, amount: int }"
def setUp(self):
self.csv_file = tempfile.mktemp(".csv")
with open(self.csv_file, "w") as f:
f.write(self.buf)
self.dd = CSV(self.csv_file, dialect='excel', schema=self.schema,
delimiter=' ', mode='r+')
assert self.dd.header
def tearDown(self):
if os.path.exists(self.csv_file):
os.remove(self.csv_file)
def test_row(self):
self.assertEqual(tuplify(self.dd.py[0]), ('Alice', 100))
self.assertEqual(tuplify(self.dd.py[1]), ('Bob', 200))
def test_dynd(self):
assert isinstance(self.dd.dynd[0], nd.array)
def test_rows(self):
self.assertEqual(tuplify(self.dd.py[[0, 1]]), (('Alice', 100), ('Bob', 200)))
def test_point(self):
self.assertEqual(self.dd.py[0, 0], 'Alice')
self.assertEqual(self.dd.py[1, 1], 200)
def test_nested(self):
self.assertEqual(tuplify(self.dd.py[[0, 1], 0]), ('Alice', 'Bob'))
self.assertEqual(tuplify(self.dd.py[[0, 1], 1]), (100, 200))
self.assertEqual(tuplify(self.dd.py[0, [0, 1]]), ('Alice', 100))
self.assertEqual(tuplify(self.dd.py[[1, 0], [0, 1]]),
(('Bob', 200), ('Alice', 100)))
def test_slices(self):
self.assertEqual(list(self.dd.py[:, 1]), [100, 200, 50])
self.assertEqual(list(self.dd.py[1:, 1]), [200, 50])
self.assertEqual(list(self.dd.py[0, :]), ['Alice', 100])
def test_names(self):
self.assertEqual(list(self.dd.py[:, 'name']), ['Alice', 'Bob', 'Alice'])
self.assertEqual(tuplify(self.dd.py[:, ['amount', 'name']]),
((100, 'Alice'), (200, 'Bob'), (50, 'Alice')))
def test_dynd_complex(self):
self.assertEqual(tuplify(self.dd.py[:, ['amount', 'name']]),
tuplify(nd.as_py(self.dd.dynd[:, ['amount', 'name']],
tuple=True)))
def test_laziness(self):
print(type(self.dd.py[:, 1]))
assert isinstance(self.dd.py[:, 1], Iterator)
class Test_Dialect(unittest.TestCase):
buf = sanitize(
u"""Name Amount
Alice 100
Bob 200
Alice 50
""")
schema = "{ f0: string, f1: int }"
def setUp(self):
self.csv_file = tempfile.mktemp(".csv")
with open(self.csv_file, "w") as f:
f.write(self.buf)
self.dd = CSV(self.csv_file, dialect='excel', schema=self.schema,
delimiter=' ', mode='r+')
def tearDown(self):
os.remove(self.csv_file)
def test_schema_detection(self):
dd = CSV(self.csv_file)
assert dd.schema == dshape('{Name: string, Amount: int64}')
dd = CSV(self.csv_file, columns=['foo', 'bar'])
assert dd.schema == dshape('{foo: string, bar: int64}')
@skipIf(sys.version_info[:2] < (2, 7), 'CSV module unable to parse')
def test_has_header(self):
assert has_header(self.buf)
def test_overwrite_delimiter(self):
self.assertEquals(self.dd.dialect['delimiter'], ' ')
def test_content(self):
s = str(list(self.dd))
assert 'Alice' in s and 'Bob' in s
def test_append(self):
self.dd.extend([('Alice', 100)])
with open(self.csv_file) as f:
self.assertEqual(f.readlines()[-1].strip(), 'Alice 100')
def test_append_dict(self):
self.dd.extend([{'f0': 'Alice', 'f1': 100}])
with open(self.csv_file) as f:
self.assertEqual(f.readlines()[-1].strip(), 'Alice 100')
def test_extend_structured(self):
with filetext('1,1.0\n2,2.0\n') as fn:
csv = CSV(fn, 'r+', schema='{x: int32, y: float32}',
delimiter=',')
csv.extend([(3, 3)])
assert tuplify(tuple(csv)) == ((1, 1.0), (2, 2.0), (3, 3.0))
def test_discover_dialect(self):
s = '1,1\r\n2,2'
self.assertEqual(discover_dialect(s),
{'escapechar': None,
'skipinitialspace': False,
'quoting': 0,
'delimiter': ',',
'lineterminator': '\r\n',
'quotechar': '"',
'doublequote': False})
class TestCSV_New_File(unittest.TestCase):
data = (('Alice', 100),
('Bob', 200),
('Alice', 50))
schema = "{ f0: string, f1: int32 }"
def setUp(self):
self.filename = tempfile.mktemp(".csv")
def tearDown(self):
if os.path.exists(self.filename):
os.remove(self.filename)
def test_errs_without_dshape(self):
self.assertRaises(ValueError, lambda: CSV(self.filename, 'w'))
def test_creation(self):
dd = CSV(self.filename, 'w', schema=self.schema, delimiter=' ')
def test_creation_rw(self):
dd = CSV(self.filename, 'w+', schema=self.schema, delimiter=' ')
def test_append(self):
dd = CSV(self.filename, 'w', schema=self.schema, delimiter=' ')
dd.extend([self.data[0]])
with open(self.filename) as f:
self.assertEqual(f.readlines()[0].strip(), 'Alice 100')
def test_extend(self):
dd = CSV(self.filename, 'w', schema=self.schema, delimiter=' ')
dd.extend(self.data)
with open(self.filename) as f:
lines = f.readlines()
self.assertEqual(lines[0].strip(), 'Alice 100')
self.assertEqual(lines[1].strip(), 'Bob 200')
self.assertEqual(lines[2].strip(), 'Alice 50')
expected_dshape = datashape.DataShape(datashape.Var(), self.schema)
# TODO: datashape comparison is broken
self.assertEqual(str(dd.dshape).replace(' ', ''),
str(expected_dshape).replace(' ', ''))
class TestTransfer(unittest.TestCase):
def test_re_dialect(self):
dialect1 = {'delimiter': ',', 'lineterminator': '\n'}
dialect2 = {'delimiter': ';', 'lineterminator': '--'}
text = '1,1\n2,2\n'
schema = '2 * int32'
with filetext(text) as source_fn:
with filetext('') as dest_fn:
src = CSV(source_fn, schema=schema, **dialect1)
dst = CSV(dest_fn, mode='w', schema=schema, **dialect2)
# Perform copy
dst.extend(src)
with open(dest_fn) as f:
self.assertEquals(f.read(), '1;1--2;2--')
def test_iter(self):
with filetext('1,1\n2,2\n') as fn:
dd = CSV(fn, schema='2 * int32')
self.assertEquals(tuplify(list(dd)), ((1, 1), (2, 2)))
def test_chunks(self):
with filetext('1,1\n2,2\n3,3\n4,4\n') as fn:
dd = CSV(fn, schema='2 * int32')
assert all(isinstance(chunk, nd.array) for chunk in dd.chunks())
self.assertEquals(len(list(dd.chunks(blen=2))), 2)
self.assertEquals(len(list(dd.chunks(blen=3))), 2)
def test_iter_structured(self):
with filetext('1,2\n3,4\n') as fn:
dd = CSV(fn, schema='{x: int, y: int}')
self.assertEquals(tuplify(list(dd)), ((1, 2), (3, 4)))
class TestCSV(unittest.TestCase):
# A CSV toy example
buf = sanitize(
u"""k1,v1,1,False
k2,v2,2,True
k3,v3,3,False
""")
data = (('k1', 'v1', 1, False),
('k2', 'v2', 2, True),
('k3', 'v3', 3, False))
schema = "{ f0: string, f1: string, f2: int16, f3: bool }"
def setUp(self):
self.csv_file = tempfile.mktemp(".csv")
with open(self.csv_file, "w") as f:
f.write(self.buf)
def tearDown(self):
os.remove(self.csv_file)
def test_compute(self):
dd = CSV(self.csv_file, schema=self.schema)
from blaze.expr.table import TableSymbol
from blaze.compute.python import compute
t = TableSymbol('t', self.schema)
self.assertEqual(compute(t['f2'].sum(), dd), 1 + 2 + 3)
def test_has_header(self):
assert not has_header(self.buf)
def test_basic_object_type(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertTrue(isinstance(dd, DataDescriptor))
self.assertTrue(isinstance(dd.dshape.shape[0], datashape.Var))
def test_iter(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertEqual(tuplify(tuple(dd)), self.data)
def test_as_py(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertEqual(tuplify(dd.as_py()), self.data)
def test_getitem_start(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertEqual(tuplify(dd.py[0]), self.data[0])
def test_getitem_stop(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertEqual(tuplify(dd.py[:1]), self.data[:1])
def test_getitem_step(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertEqual(tuplify(dd.py[::2]), self.data[::2])
def test_getitem_start_step(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertEqual(tuplify(dd.py[1::2]), self.data[1::2])
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "aterrel/blaze",
"path": "blaze/data/tests/test_csv.py",
"copies": "1",
"size": "11700",
"license": "bsd-3-clause",
"hash": 5125669127557384000,
"line_mean": 31.4099722992,
"line_max": 85,
"alpha_frac": 0.5537606838,
"autogenerated": false,
"ratio": 3.3883579496090355,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9433410556585051,
"avg_score": 0.0017416153647969355,
"num_lines": 361
} |
from __future__ import absolute_import, division, print_function
import unittest
import tempfile
import os
import csv
import datashape
from blaze.data.core import DataDescriptor
from blaze.data import CSV
from blaze.data.csv import has_header
from blaze.utils import filetext
from dynd import nd
def sanitize(lines):
return '\n'.join(line.strip() for line in lines.split('\n'))
class Test_Dialect(unittest.TestCase):
buf = sanitize(
u"""Name Amount
Alice 100
Bob 200
Alice 50
""")
schema = "{ f0: string, f1: int }"
def setUp(self):
self.csv_file = tempfile.mktemp(".csv")
with open(self.csv_file, "w") as f:
f.write(self.buf)
self.dd = CSV(self.csv_file, dialect='excel', schema=self.schema,
delimiter=' ', mode='r+')
def tearDown(self):
os.remove(self.csv_file)
def test_has_header(self):
assert has_header(self.buf)
def test_overwrite_delimiter(self):
self.assertEquals(self.dd.dialect['delimiter'], ' ')
def test_content(self):
s = str(list(self.dd))
assert 'Alice' in s and 'Bob' in s
def test_append(self):
self.dd.extend([('Alice', 100)])
with open(self.csv_file) as f:
self.assertEqual(f.readlines()[-1].strip(), 'Alice 100')
def test_append_dict(self):
self.dd.extend([{'f0': 'Alice', 'f1': 100}])
with open(self.csv_file) as f:
self.assertEqual(f.readlines()[-1].strip(), 'Alice 100')
def test_extend_structured(self):
with filetext('1,1.0\n2,2.0\n') as fn:
csv = CSV(fn, 'r+', schema='{x: int32, y: float32}',
delimiter=',')
csv.extend([(3, 3)])
assert (list(csv) == [[1, 1.0], [2, 2.0], [3, 3.0]]
or list(csv) == [{'x': 1, 'y': 1.0},
{'x': 2, 'y': 2.0},
{'x': 3, 'y': 3.0}])
class TestCSV_New_File(unittest.TestCase):
data = [('Alice', 100),
('Bob', 200),
('Alice', 50)]
schema = "{ f0: string, f1: int32 }"
def setUp(self):
self.filename = tempfile.mktemp(".csv")
def tearDown(self):
if os.path.exists(self.filename):
os.remove(self.filename)
def test_errs_without_dshape(self):
self.assertRaises(ValueError, lambda: CSV(self.filename, 'w'))
def test_creation(self):
dd = CSV(self.filename, 'w', schema=self.schema, delimiter=' ')
def test_creation_rw(self):
dd = CSV(self.filename, 'w+', schema=self.schema, delimiter=' ')
def test_append(self):
dd = CSV(self.filename, 'w', schema=self.schema, delimiter=' ')
dd.extend([self.data[0]])
with open(self.filename) as f:
self.assertEqual(f.readlines()[0].strip(), 'Alice 100')
def test_extend(self):
dd = CSV(self.filename, 'w', schema=self.schema, delimiter=' ')
dd.extend(self.data)
with open(self.filename) as f:
lines = f.readlines()
self.assertEqual(lines[0].strip(), 'Alice 100')
self.assertEqual(lines[1].strip(), 'Bob 200')
self.assertEqual(lines[2].strip(), 'Alice 50')
expected_dshape = datashape.DataShape(datashape.Var(), self.schema)
# TODO: datashape comparison is broken
self.assertEqual(str(dd.dshape).replace(' ', ''),
str(expected_dshape).replace(' ', ''))
class TestTransfer(unittest.TestCase):
def test_re_dialect(self):
dialect1 = {'delimiter': ',', 'lineterminator': '\n'}
dialect2 = {'delimiter': ';', 'lineterminator': '--'}
text = '1,1\n2,2\n'
schema = '2 * int32'
with filetext(text) as source_fn:
with filetext('') as dest_fn:
src = CSV(source_fn, schema=schema, **dialect1)
dst = CSV(dest_fn, mode='w', schema=schema, **dialect2)
# Perform copy
dst.extend(src)
with open(dest_fn) as f:
self.assertEquals(f.read(), '1;1--2;2--')
def test_iter(self):
with filetext('1,1\n2,2\n') as fn:
dd = CSV(fn, schema='2 * int32')
self.assertEquals(list(dd), [[1, 1], [2, 2]])
def test_chunks(self):
with filetext('1,1\n2,2\n3,3\n4,4\n') as fn:
dd = CSV(fn, schema='2 * int32')
assert all(isinstance(chunk, nd.array) for chunk in dd.chunks())
self.assertEquals(len(list(dd.chunks(blen=2))), 2)
self.assertEquals(len(list(dd.chunks(blen=3))), 2)
def test_iter_structured(self):
with filetext('1,2\n3,4\n') as fn:
dd = CSV(fn, schema='{x: int, y: int}')
self.assertEquals(list(dd), [{'x': 1, 'y': 2}, {'x': 3, 'y': 4}])
class TestCSV(unittest.TestCase):
# A CSV toy example
buf = sanitize(
u"""k1,v1,1,False
k2,v2,2,True
k3,v3,3,False
""")
schema = "{ f0: string, f1: string, f2: int16, f3: bool }"
def setUp(self):
self.csv_file = tempfile.mktemp(".csv")
with open(self.csv_file, "w") as f:
f.write(self.buf)
def tearDown(self):
os.remove(self.csv_file)
def test_has_header(self):
assert not has_header(self.buf)
def test_basic_object_type(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertTrue(isinstance(dd, DataDescriptor))
self.assertTrue(isinstance(dd.dshape.shape[0], datashape.Var))
self.assertEqual(list(dd), [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_iter(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertEqual(list(dd), [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_as_py(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertEqual(dd.as_py(), [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_chunks(self):
dd = CSV(self.csv_file, schema=self.schema)
vals = []
for el in dd.chunks(blen=2):
self.assertTrue(isinstance(el, nd.array))
vals.extend(nd.as_py(el))
self.assertEqual(vals, [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_append(self):
# Get a private file so as to not mess the original one
csv_file = tempfile.mktemp(".csv")
with open(csv_file, "w") as f:
f.write(self.buf)
dd = CSV(csv_file, schema=self.schema, mode='r+')
dd.extend([["k4", "v4", 4, True]])
vals = [nd.as_py(v) for v in dd.chunks(blen=2)]
self.assertEqual(vals, [
[{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True}],
[{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False},
{u'f0': u'k4', u'f1': u'v4', u'f2': 4, u'f3': True}]])
self.assertRaises(ValueError, lambda: dd.extend([3.3]))
os.remove(csv_file)
def test_getitem_start(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertEqual(dd[0],
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False})
def test_getitem_stop(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertEqual(dd[:1], [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False}])
def test_getitem_step(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertEqual(dd[::2], [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_getitem_start_step(self):
dd = CSV(self.csv_file, schema=self.schema)
self.assertEqual(dd[1::2], [
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True}])
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/data/tests/test_csv.py",
"copies": "1",
"size": "8543",
"license": "bsd-3-clause",
"hash": 3352021965356333000,
"line_mean": 32.2412451362,
"line_max": 77,
"alpha_frac": 0.5212454641,
"autogenerated": false,
"ratio": 2.9694125825512687,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39906580466512687,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import tempfile
import os
import datashape
from blaze.datadescriptor import (
CSVDataDescriptor, DyNDDataDescriptor, IDataDescriptor, dd_as_py)
# A CSV toy example
csv_buf = u"""k1,v1,1,False
k2,v2,2,True
k3,v3,3,False
"""
csv_schema = "{ f0: string, f1: string, f2: int16, f3: bool }"
class TestCSVDataDescriptor(unittest.TestCase):
def setUp(self):
handle, self.csv_file = tempfile.mkstemp(".csv")
with os.fdopen(handle, "w") as f:
f.write(csv_buf)
def tearDown(self):
os.remove(self.csv_file)
def test_basic_object_type(self):
self.assertTrue(issubclass(CSVDataDescriptor, IDataDescriptor))
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
self.assertTrue(isinstance(dd, IDataDescriptor))
self.assertTrue(isinstance(dd.dshape.shape[0], datashape.Var))
self.assertEqual(dd_as_py(dd), [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_iter(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
# Iteration should produce DyNDDataDescriptor instances
vals = []
for el in dd:
self.assertTrue(isinstance(el, DyNDDataDescriptor))
self.assertTrue(isinstance(el, IDataDescriptor))
vals.append(dd_as_py(el))
self.assertEqual(vals, [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_iterchunks(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
# Iteration should produce DyNDDataDescriptor instances
vals = []
for el in dd.iterchunks(blen=2):
self.assertTrue(isinstance(el, DyNDDataDescriptor))
self.assertTrue(isinstance(el, IDataDescriptor))
vals.extend(dd_as_py(el))
self.assertEqual(vals, [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_iterchunks_start(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
vals = []
for el in dd.iterchunks(blen=2, start=1):
vals.extend(dd_as_py(el))
self.assertEqual(vals, [
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_iterchunks_stop(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
vals = [dd_as_py(v) for v in dd.iterchunks(blen=1, stop=2)]
self.assertEqual(vals, [
[{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False}],
[{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True}]])
def test_iterchunks_start_stop(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
vals = [dd_as_py(v) for v in dd.iterchunks(blen=1, start=1, stop=2)]
self.assertEqual(vals, [[
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True}]])
def test_append(self):
# Get a private file so as to not mess the original one
handle, csv_file = tempfile.mkstemp(".csv")
with os.fdopen(handle, "w") as f:
f.write(csv_buf)
dd = CSVDataDescriptor(csv_file, schema=csv_schema)
dd.append(["k4", "v4", 4, True])
vals = [dd_as_py(v) for v in dd.iterchunks(blen=1, start=3)]
self.assertEqual(vals, [[
{u'f0': u'k4', u'f1': u'v4', u'f2': 4, u'f3': True}]])
os.remove(csv_file)
def test_getitem_start(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
el = dd[0]
self.assertTrue(isinstance(el, DyNDDataDescriptor))
vals = dd_as_py(el)
self.assertEqual(vals, [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False}])
def test_getitem_stop(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
el = dd[:1]
self.assertTrue(isinstance(el, DyNDDataDescriptor))
vals = dd_as_py(el)
self.assertEqual(vals, [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False}])
def test_getitem_step(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
el = dd[::2]
self.assertTrue(isinstance(el, DyNDDataDescriptor))
vals = dd_as_py(el)
self.assertEqual(vals, [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_getitem_start_step(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
el = dd[1::2]
self.assertTrue(isinstance(el, DyNDDataDescriptor))
vals = dd_as_py(el)
self.assertEqual(vals, [
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True}])
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "mwiebe/blaze",
"path": "blaze/datadescriptor/tests/test_csv_data_descriptor.py",
"copies": "2",
"size": "5241",
"license": "bsd-3-clause",
"hash": -4940539992154953000,
"line_mean": 37.2554744526,
"line_max": 76,
"alpha_frac": 0.5601984354,
"autogenerated": false,
"ratio": 2.7862838915470496,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.434648232694705,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
from datashape import coercion_cost, dshape, dshapes, error
from datashape.tests import common
from datashape.py2help import skip
class TestCoercion(common.BTestCase):
def test_coerce_ctype(self):
a, b, c = dshapes('float32', 'float32', 'float64')
self.assertLess(coercion_cost(a, b), coercion_cost(a, c))
a, b, c = dshapes('uint64', 'uint64', 'int64')
self.assertLess(coercion_cost(a, b), coercion_cost(a, c))
a, b, c = dshapes('int64', 'int64', 'uint64')
self.assertLess(coercion_cost(a, b), coercion_cost(a, c))
a, b, c = dshapes('float64', 'float64', 'complex[float32]')
self.assertLess(coercion_cost(a, b), coercion_cost(a, c))
a, b, c = dshapes('int16', 'float64', 'complex[float32]')
self.assertLess(coercion_cost(a, b), coercion_cost(a, c))
a, b, c = dshapes('int8', 'float64', 'complex[float32]')
self.assertLess(coercion_cost(a, b), coercion_cost(a, c))
def test_coerce_ctype_float_vs_complex(self):
# int -> float32 is preferred over int -> complex[float32]
a, b, c = dshapes('int32', 'float32', 'complex[float32]')
self.assertLess(coercion_cost(a, b), coercion_cost(a, c))
# int -> float64 is preferred over int -> complex[float64]
a, b, c = dshapes('int32', 'float64', 'complex[float64]')
self.assertLess(coercion_cost(a, b), coercion_cost(a, c))
# int -> float64 is preferred over int -> complex[float32]
a, b, c = dshapes('int32', 'float64', 'complex[float32]')
self.assertLess(coercion_cost(a, b), coercion_cost(a, c))
def test_coerce_numeric(self):
a, b = dshapes('float32', 'float64')
self.assertGreater(coercion_cost(a, b), 0)
def test_coercion_transitivity(self):
a, b, c = dshapes('int8', 'complex128', 'float64')
self.assertGreater(coercion_cost(a, b), coercion_cost(a, c))
@skip('This is something that needs to be handled by overloading')
def test_coerce_typevars(self):
a, b, c = dshapes('10 * 11 * float32', 'X * Y * float64',
'10 * Y * float64')
self.assertGreater(coercion_cost(a, b), coercion_cost(a, c))
@skip('This is something that needs to be handled by overloading')
def test_coerce_constrained_typevars(self):
a, b, c = dshapes('10 * 10 * float32', 'X * Y * float64',
'X * X * float64')
self.assertGreater(coercion_cost(a, b), coercion_cost(a, c))
def test_coerce_broadcasting(self):
a, b, c = dshapes('10 * 10 * float32', '10 * Y * Z * float64',
'X * Y * float64')
self.assertGreater(coercion_cost(a, b), coercion_cost(a, c))
def test_coerce_broadcasting2(self):
a, b, c = dshapes('10 * 10 * float32', '1 * 10 * 10 * float32',
'10 * 10 * float32')
self.assertGreater(coercion_cost(a, b), coercion_cost(a, c))
def test_coerce_broadcasting3(self):
a, b, c = dshapes('10 * 10 * float32', '10 * 10 * 10 * float32',
'1 * 10 * 10 * float32')
self.assertGreater(coercion_cost(a, b), coercion_cost(a, c))
@skip('implements has not been implemented in the new parser')
def test_coerce_traits(self):
a, b, c = dshapes('10 * 10 * float32', '10 * X * A : floating',
'10 * X * float32')
self.assertGreater(coercion_cost(a, b), coercion_cost(a, c))
def test_coerce_dst_ellipsis(self):
a, b, c = dshapes('10 * 10 * float32', 'X * ... * float64',
'X * Y * float64')
self.assertGreater(coercion_cost(a, b), coercion_cost(a, c))
@skip('not dealing with ellipsis in the src of a coercion')
def test_coerce_src_ellipsis(self):
a, b, c = dshapes('10 * ... * float32', 'X * Y * float64',
'X * ... * float64')
self.assertGreater(coercion_cost(a, b), coercion_cost(a, c))
def test_allow_anything_to_bool(self):
# The cost should be large
min_cost = coercion_cost(dshape('int8'), dshape('complex[float64]'))
for ds in ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16',
'uint32', 'uint64', 'float32', 'float64',
'complex[float32]', 'complex[float64]']:
self.assertGreater(coercion_cost(dshape(ds), dshape('bool')),
min_cost)
class TestCoercionErrors(unittest.TestCase):
def test_downcast(self):
a, b = dshapes('float32', 'int32')
self.assertRaises(error.CoercionError, coercion_cost, a, b)
def test_disallow_bool_to_anything(self):
for ds in ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16',
'uint32', 'uint64', 'float32', 'float64',
'complex[float32]', 'complex[float64]']:
self.assertRaises(error.CoercionError, coercion_cost,
dshape('bool'), dshape(ds))
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "aterrel/datashape",
"path": "datashape/tests/test_coercion.py",
"copies": "2",
"size": "5147",
"license": "bsd-2-clause",
"hash": -6575026463690014000,
"line_mean": 44.5486725664,
"line_max": 76,
"alpha_frac": 0.5731494074,
"autogenerated": false,
"ratio": 3.270012706480305,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4843162113880305,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
from datashape import coretypes as T
from datashape.type_equation_solver import (matches_datashape_pattern,
match_argtypes_to_signature,
_match_equation)
from datashape import dshape
from datashape import error
from datashape.coercion import dim_coercion_cost, dtype_coercion_cost
class TestPatternMatch(unittest.TestCase):
def test_simple_matches(self):
self.assertTrue(matches_datashape_pattern(dshape('int32'),
dshape('int32')))
self.assertTrue(matches_datashape_pattern(dshape('int32'),
dshape('M')))
self.assertTrue(matches_datashape_pattern(dshape('int32'),
dshape('A... * int32')))
self.assertTrue(matches_datashape_pattern(dshape('int32'),
dshape('A... * M')))
self.assertFalse(matches_datashape_pattern(dshape('int32'),
dshape('int64')))
self.assertFalse(matches_datashape_pattern(dshape('3 * int32'),
dshape('M')))
self.assertFalse(matches_datashape_pattern(dshape('int16'),
dshape('A... * int32')))
self.assertFalse(matches_datashape_pattern(dshape('4 * int32'),
dshape('A... * 3 * M')))
class TestSignatureArgMatching(unittest.TestCase):
def test_nargs_mismatch(self):
# Make sure an error is rased when the # of arguments is wrong
self.assertRaises(TypeError, match_argtypes_to_signature,
dshape('(int32, float64)'),
dshape('(int32) -> int32'))
self.assertRaises(TypeError, match_argtypes_to_signature,
dshape('(int32, float64)'),
dshape('(int32, float64, int16) -> int32'))
def test_dtype_matches_concrete(self):
# Exact match, same signature and zero cost
at = dshape('(int32, float64)')
sig = dshape('(int32, float64) -> int16')
self.assertEqual(match_argtypes_to_signature(at, sig),
(sig[0], 0))
# Requires a coercion, cost is that of the coercion
at = dshape('(int32, int32)')
sig = dshape('(int32, float64) -> int16')
self.assertEqual(match_argtypes_to_signature(at, sig),
(sig[0], dtype_coercion_cost(T.int32, T.float64)))
# Requires two coercions, cost is maximum of the two
at = dshape('(int16, int32)')
sig = dshape('(int32, float64) -> int16')
self.assertEqual(match_argtypes_to_signature(at, sig),
(sig[0], dtype_coercion_cost(T.int32, T.float64)))
def test_dtype_coerce_error(self):
at = dshape('(int32, float64)')
sig = dshape('(int32, int32) -> int16')
self.assertRaises(error.CoercionError, match_argtypes_to_signature,
at, sig)
def test_dtype_matches_typevar(self):
# Exact match, and zero cost
at = dshape('(int32, float64)')
sig = dshape('(int32, T) -> T')
self.assertEqual(match_argtypes_to_signature(at, sig),
(dshape('(int32, float64) -> float64')[0], 0.125))
# Type promotion between the inputs
at = dshape('(int32, float64)')
sig = dshape('(T, T) -> T')
self.assertEqual(match_argtypes_to_signature(at, sig),
(dshape('(float64, float64) -> float64')[0], 0.125))
# Type promotion between the inputs
at = dshape('(int32, bool, float64)')
sig = dshape('(T, S, T) -> S')
self.assertEqual(match_argtypes_to_signature(at, sig),
(dshape('(float64, bool, float64) -> bool')[0], 0.125))
def test_dshape_matches_concrete(self):
# Exact match, same signature and zero cost
at = dshape('(3 * int32, 2 * var * float64)')
sig = dshape('(3 * int32, 2 * var * float64) -> 4 * int16')
self.assertEqual(match_argtypes_to_signature(at, sig),
(sig[0], 0))
# Requires broadcasting
at = dshape('(1 * int32, 2 * 4 * float64)')
sig = dshape('(3 * int32, 2 * var * float64) -> 4 * int16')
self.assertEqual(match_argtypes_to_signature(at, sig),
(sig[0], max(dim_coercion_cost(T.Fixed(1), T.Fixed(3)),
dim_coercion_cost(T.Fixed(4), T.Var()))))
def test_dshape_matches_typevar(self):
# Arrays with matching size
at = dshape('(5 * int32, 5 * float64)')
sig = dshape('(N * int32, N * float64) -> N * int16')
self.assertEqual(match_argtypes_to_signature(at, sig),
(dshape('(5 * int32, 5 * float64) -> 5 * int16')[0],
0.125))
# Matrix multiplication
at = dshape('(3 * 5 * float64, 5 * 6 * float32)')
sig = dshape('(M * N * A, N * R * A) -> M * R * A')
self.assertEqual(match_argtypes_to_signature(at, sig),
(dshape('(3 * 5 * float64, 5 * 6 * float64) ->' +
' 3 * 6 * float64')[0], 0.375))
# Broadcasted matrix multiplication
at = dshape('(20 * 3 * 5 * float64, 3 * 1 * 5 * 6 * float32)')
sig = dshape('(Dims... * M * N * A, Dims... * N * R * A) ->' +
' Dims... * M * R * A')
self.assertEqual(match_argtypes_to_signature(at, sig),
(dshape('(20 * 3 * 5 * float64,' +
' 3 * 1 * 5 * 6 * float64) ->' +
' 3 * 20 * 3 * 6 * float64')[0], 0.625))
def test_dshape_dim_mismatch_error(self):
# Single dimension type variables must match up exactly
at = dshape('(1 * int32, 3 * float64)')
sig = dshape('(M * int32, M * int32) -> M * int16')
self.assertRaises(error.CoercionError, match_argtypes_to_signature,
at, sig)
# Ellipsis typevars must broadcast
at = dshape('(2 * int32, 3 * float64)')
sig = dshape('(Dims... * int32, Dims... * int32) -> Dims... * int16')
self.assertRaises(error.CoercionError, match_argtypes_to_signature,
at, sig)
def test_broadcast_vs_not(self):
# Single dimension type variables must match up exactly
at = dshape('(int32, float64)')
sig_scalar = dshape('(float64, float64) -> int16')
sig_bcast = dshape('(A... * float64, A... * float64) -> A... * int16')
match_scalar = match_argtypes_to_signature(at, sig_scalar)
match_bcast = match_argtypes_to_signature(at, sig_bcast)
self.assertEqual(match_scalar[0],
dshape('(float64, float64) -> int16')[0])
self.assertEqual(match_bcast[0],
dshape('(float64, float64) -> int16')[0])
# Should be cheaper to match without the broadcasting
self.assertTrue(match_scalar[1] < match_bcast[1])
def test_tv_matches_struct(self):
at = dshape('(3 * {x: int, y: string}, 3 * bool)')
sig = dshape('(M * T, M * bool) -> var * T')
match = match_argtypes_to_signature(at, sig)
self.assertEqual(match[0],
dshape('(3 * {x: int, y: string}, 3 * bool) -> var * {x: int, y: string}')[0])
def test_match_with_resolver(self):
# Test matching with a resolver function
# This is a contrived resolver which combines the A... and
# B typevars in a way that cannot be done with simple pattern
# matching. While not a useful example in and of itself, it
# exhibits the needed behavior in reduction function signature
# matching.
def resolver(tvar, tvdict):
if tvar == T.Ellipsis(T.TypeVar('R')):
a = tvdict[T.Ellipsis(T.TypeVar('A'))]
b = tvdict[T.TypeVar('B')]
result = [b]
for x in a:
result.extend([x, b])
return result
elif tvar == T.TypeVar('T'):
return T.int16
at = dshape('(5 * int32, 4 * float64)')
sig = dshape('(B * int32, A... * float64) -> R... * T')
self.assertEqual(match_argtypes_to_signature(at, sig, resolver),
(dshape('(5 * int32, 4 * float64) -> 5 * 4 * 5 * int16')[0],
0.25))
at = dshape('(5 * var * 2 * int32, 4 * float64)')
sig = dshape('(A... * int32, B * float64) -> R... * 2 * T')
self.assertEqual(match_argtypes_to_signature(at, sig, resolver),
(dshape('(5 * var * 2 * int32, 4 * float64) ->' +
' 4 * 5 * 4 * var * 4 * 2 * 4 * 2 * int16')[0],
0.25))
class TestEquationMatching(unittest.TestCase):
def test_match_equation_dtype(self):
# A simple coercion
eqns = _match_equation(dshape('int32'), dshape('int64'))
self.assertEqual(eqns, [(T.int32, T.int64)])
# Matching a data type variable
eqns = _match_equation(dshape('int32'), dshape('D'))
self.assertEqual(eqns, [(T.int32, T.TypeVar('D'))])
def test_match_equation_dim(self):
# Broadcasting a single dimension
eqns = _match_equation(dshape('1 * int32'), dshape('10 * int32'))
self.assertEqual(eqns, [(T.Fixed(1), T.Fixed(10)),
(T.int32, T.int32)])
# Matching a dim type variable
eqns = _match_equation(dshape('3 * int32'), dshape('M * int32'))
self.assertEqual(eqns, [(T.Fixed(3), T.TypeVar('M')),
(T.int32, T.int32)])
def test_match_equation_ellipsis(self):
# Matching an ellipsis
eqns = _match_equation(dshape('int32'), dshape('... * int32'))
self.assertEqual(eqns, [([], T.Ellipsis()),
(T.int32, T.int32)])
eqns = _match_equation(dshape('3 * int32'), dshape('... * int32'))
self.assertEqual(eqns, [([T.Fixed(3)], T.Ellipsis()),
(T.int32, T.int32)])
eqns = _match_equation(dshape('3 * var * int32'), dshape('... * int32'))
self.assertEqual(eqns, [([T.Fixed(3), T.Var()], T.Ellipsis()),
(T.int32, T.int32)])
# Matching an ellipsis type variable
eqns = _match_equation(dshape('int32'), dshape('A... * int32'))
self.assertEqual(eqns, [([], T.Ellipsis(T.TypeVar('A'))),
(T.int32, T.int32)])
eqns = _match_equation(dshape('3 * int32'), dshape('A... * int32'))
self.assertEqual(eqns, [([T.Fixed(3)], T.Ellipsis(T.TypeVar('A'))),
(T.int32, T.int32)])
eqns = _match_equation(dshape('3 * var * int32'), dshape('A... * int32'))
self.assertEqual(eqns, [([T.Fixed(3), T.Var()], T.Ellipsis(T.TypeVar('A'))),
(T.int32, T.int32)])
# Matching an ellipsis with a dim type variable on the left
eqns = _match_equation(dshape('3 * var * int32'), dshape('A * B... * int32'))
self.assertEqual(eqns, [(T.Fixed(3), T.TypeVar('A')),
([T.Var()], T.Ellipsis(T.TypeVar('B'))),
(T.int32, T.int32)])
# Matching an ellipsis with a dim type variable on the right
eqns = _match_equation(dshape('3 * var * int32'), dshape('A... * B * int32'))
self.assertEqual(eqns, [([T.Fixed(3)], T.Ellipsis(T.TypeVar('A'))),
(T.Var(), T.TypeVar('B')),
(T.int32, T.int32)])
# Matching an ellipsis with a dim type variable on both sides
eqns = _match_equation(dshape('3 * var * int32'), dshape('A * B... * C * int32'))
self.assertEqual(eqns, [(T.Fixed(3), T.TypeVar('A')),
([], T.Ellipsis(T.TypeVar('B'))),
(T.Var(), T.TypeVar('C')),
(T.int32, T.int32)])
eqns = _match_equation(dshape('3 * var * 4 * M * int32'), dshape('A * B... * C * int32'))
self.assertEqual(eqns, [(T.Fixed(3), T.TypeVar('A')),
([T.Var(), T.Fixed(4)], T.Ellipsis(T.TypeVar('B'))),
(T.TypeVar('M'), T.TypeVar('C')),
(T.int32, T.int32)])
| {
"repo_name": "aterrel/datashape",
"path": "datashape/tests/test_type_equation_solver.py",
"copies": "1",
"size": "12820",
"license": "bsd-2-clause",
"hash": 3002905241598810600,
"line_mean": 51.9752066116,
"line_max": 103,
"alpha_frac": 0.5030421217,
"autogenerated": false,
"ratio": 3.6744052737173973,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9673151546304292,
"avg_score": 0.0008591698226210632,
"num_lines": 242
} |
from __future__ import absolute_import, division, print_function
import unittest
from datashape import dshape
from blaze.compute.air import explicit_coercions
from blaze.compute.air.tests.utils import make_graph
class TestCoercions(unittest.TestCase):
def test_coercions(self):
f, values, graph = make_graph()
explicit_coercions(f)
ops = [(op.opcode, op.type) for op in f.ops][:-1]
expected = [('convert', dshape("10, float64")),
('kernel', dshape("10, float64")),
('convert', dshape("10, complex[float64]")),
('kernel', dshape("10, complex[float64]"))]
self.assertEqual(ops, expected)
# function 10, complex[float64] expr0(10, float64 %e0, 10, int32 %e1, 10, complex[float64] %e2) {
# entry:
# %3 = (10, float64) convert(%e1)
# %0 = (10, float64) kernel(%const(Bytes, blaze.ops.ufuncs.add), %3, %e0)
# %4 = (10, complex[float64]) convert(%0)
# %1 = (10, complex[float64]) kernel(%const(Bytes, blaze.ops.ufuncs.mul), %4, %e2)
# %2 = (Void) ret(%1)
#
# }
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "zeeshanali/blaze",
"path": "blaze/compute/air/tests/test_transforms.py",
"copies": "1",
"size": "1205",
"license": "bsd-3-clause",
"hash": -6411709695848540000,
"line_mean": 34.4411764706,
"line_max": 105,
"alpha_frac": 0.5626556017,
"autogenerated": false,
"ratio": 3.1627296587926508,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9220739939164655,
"avg_score": 0.0009290642655990826,
"num_lines": 34
} |
from __future__ import absolute_import, division, print_function
import unittest
from datashape import dshape
import blaze
from blaze import array
from blaze.compute.ops.ufuncs import add, mul
import numpy as np
#------------------------------------------------------------------------
# Utils
#------------------------------------------------------------------------
def make_expr(ds1, ds2):
a = array(range(10), dshape=ds1)
b = array(range(10), dshape=ds2)
expr = add(a, mul(a, b))
return expr
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
class TestJit(unittest.TestCase):
def test_jit(self):
expr = make_expr(dshape('10, float32'), dshape('10, float32'))
result = blaze.eval(expr, strategy='jit')
expected = blaze.array([ 0, 2, 6, 12, 20, 30, 42, 56, 72, 90])
self.assertEqual(type(result), blaze.Array)
self.assertTrue(np.all(result == expected))
def test_jit_promotion(self):
expr = make_expr(dshape('10, int32'), dshape('10, float32'))
result = blaze.eval(expr, strategy='jit')
expected = blaze.array([ 0, 2, 6, 12, 20, 30, 42, 56, 72, 90],
dshape=dshape('10, float64'))
self.assertEqual(type(result), blaze.Array)
self.assertTrue(np.all(result == expected))
def test_jit_scalar(self):
a = blaze.array(range(10), dshape=dshape('10, int32'))
b = 10
expr = add(a, mul(a, b))
result = blaze.eval(expr)
np_a = np.arange(10)
expected = np_a + np_a * b
self.assertTrue(np.all(result == expected))
if __name__ == '__main__':
# TestJit('test_jit').debug()
unittest.main()
| {
"repo_name": "XinSong/blaze",
"path": "blaze/compute/air/execution/tests/test_jit_interp.py",
"copies": "2",
"size": "1800",
"license": "bsd-3-clause",
"hash": -4784771157975032000,
"line_mean": 31.7272727273,
"line_max": 73,
"alpha_frac": 0.4972222222,
"autogenerated": false,
"ratio": 3.585657370517928,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5082879592717928,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import blaze
from blaze.datadescriptor import dd_as_py
class TestBasic(unittest.TestCase):
def test_add(self):
types = ['int8', 'int16', 'int32', 'int64']
for type_ in types:
a = blaze.array(range(3), dshape=type_)
c = blaze.eval(a+a)
self.assertEqual(dd_as_py(c._data), [0, 2, 4])
c = blaze.eval(((a+a)*a))
self.assertEqual(dd_as_py(c._data), [0, 2, 8])
def test_add_with_pyobj(self):
a = blaze.array(3) + 3
self.assertEqual(dd_as_py(a._data), 6)
a = 3 + blaze.array(4)
self.assertEqual(dd_as_py(a._data), 7)
a = blaze.array([1, 2]) + 4
self.assertEqual(dd_as_py(a._data), [5, 6])
a = [1, 2] + blaze.array(5)
self.assertEqual(dd_as_py(a._data), [6, 7])
#FIXME: Need to convert uint8 from dshape to ctypes
# in _get_ctypes of blaze_kernel.py
def test_mixed(self):
types1 = ['int8', 'int16', 'int32', 'int64']
types2 = ['int16', 'int32', 'float32', 'float64']
for ty1, ty2 in zip(types1, types2):
a = blaze.array(range(1,6), dshape=ty1)
b = blaze.array(range(5), dshape=ty2)
c = (a+b)*(a-b)
c = blaze.eval(c)
result = [a*a - b*b for (a,b) in zip(range(1,6),range(5))]
self.assertEqual(dd_as_py(c._data), result)
def test_ragged(self):
a = blaze.array([[1], [2, 3], [4, 5, 6]])
b = blaze.array([[1, 2, 3], [4, 5], [6]])
c = blaze.eval(a + b)
self.assertEqual(dd_as_py(c._data),
[[2, 3, 4], [6, 8], [10, 11, 12]])
c = blaze.eval(2 * a - b)
self.assertEqual(dd_as_py(c._data),
[[1, 0, -1], [0, 1], [2, 4, 6]])
class TestReduction(unittest.TestCase):
def test_min_zerosize(self):
# Empty min operations should raise, because it has no
# reduction identity
self.assertRaises(ValueError, blaze.eval, blaze.min([]))
self.assertRaises(ValueError, blaze.eval, blaze.min([], keepdims=True))
self.assertRaises(ValueError, blaze.eval, blaze.min([[], []]))
self.assertRaises(ValueError, blaze.eval, blaze.min([[], []],
keepdims=True))
self.assertRaises(ValueError, blaze.eval, blaze.min([[], []], axis=-1))
self.assertRaises(ValueError, blaze.eval, blaze.min([[], []],
axis=-1,
keepdims=True))
# However, if we're only reducing on a non-empty dimension, it's ok
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[], []],
axis=0))._data),
[])
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[], []],
axis=0,
keepdims=True))._data),
[[]])
def test_min(self):
# Min element of scalar case is the element itself
self.assertEqual(dd_as_py(blaze.eval(blaze.min(10))._data), 10)
self.assertEqual(dd_as_py(blaze.eval(blaze.min(-5.0))._data), -5.0)
# One-dimensional size one
self.assertEqual(dd_as_py(blaze.eval(blaze.min([10]))._data), 10)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([-5.0]))._data), -5.0)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([-5.0],
axis=0))._data), -5.0)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([10],
keepdims=True))._data),
[10])
# One dimensional
self.assertEqual(dd_as_py(blaze.eval(blaze.min([1, 2]))._data), 1)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([2, 1]))._data), 1)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([0, 1, 0]))._data), 0)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([0, 1, 0]))._data), 0)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([1, 0, 2]))._data), 0)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([2, 1, 0]))._data), 0)
# Two dimensional, test with minimum at all possible positions
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[1, 2, 3],
[4, 5, 6]]))._data), 1)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[2, 1, 3],
[4, 5, 6]]))._data), 1)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[3, 2, 1],
[4, 5, 6]]))._data), 1)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[3, 2, 5],
[4, 1, 6]]))._data), 1)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[3, 2, 5],
[4, 6, 1]]))._data), 1)
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[3, 2, 5],
[1, 6, 4]]))._data), 1)
# Two dimensional, with axis= argument both positive and negative
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[1, 5, 3],
[4, 2, 6]],
axis=0))._data),
[1, 2, 3])
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[1, 5, 3],
[4, 2, 6]],
axis=-2))._data),
[1, 2, 3])
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[1, 2, 3],
[4, 5, 6]],
axis=1))._data),
[1, 4])
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[1, 2, 3],
[4, 5, 6]],
axis=-1))._data),
[1, 4])
# Two dimensional, with keepdims=True
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[1, 2, 3],
[4, 5, 6]],
keepdims=True))._data),
[[1]])
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[1, 2, 3],
[5, 4, 6]],
axis=0,
keepdims=True))._data),
[[1, 2, 3]])
self.assertEqual(dd_as_py(blaze.eval(blaze.min([[1, 5, 3],
[4, 2, 6]],
axis=1,
keepdims=True))._data),
[[1], [2]])
def test_sum_zerosize(self):
# Empty sum operations should produce 0, the reduction identity
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([]))._data), 0)
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([],
keepdims=True))._data),
[0])
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[], []]))._data), 0)
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[], []],
keepdims=True))._data),
[[0]])
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[], []],
axis=-1))._data),
[0, 0])
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[], []],
axis=-1,
keepdims=True))._data),
[[0], [0]])
# If we're only reducing on a non-empty dimension, we might still
# end up with zero-sized outputs
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[], []],
axis=0))._data),
[])
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[], []],
axis=0,
keepdims=True))._data),
[[]])
def test_sum(self):
# Sum of scalar case is the element itself
self.assertEqual(dd_as_py(blaze.eval(blaze.sum(10))._data), 10)
self.assertEqual(dd_as_py(blaze.eval(blaze.sum(-5.0))._data), -5.0)
# One-dimensional size one
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([10]))._data), 10)
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([-5.0]))._data), -5.0)
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([-5.0],
axis=0))._data), -5.0)
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([10],
keepdims=True))._data),
[10])
# One dimensional
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([1, 2]))._data), 3)
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([0, 1, 2]))._data), 3)
# Two dimensional
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[1, 2, 3],
[4, 5, 6]]))._data), 21)
# Two dimensional, with axis= argument both positive and negative
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[1, 5, 3],
[4, 2, 6]],
axis=0))._data),
[5, 7, 9])
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[1, 5, 3],
[4, 2, 6]],
axis=-2))._data),
[5, 7, 9])
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[1, 2, 3],
[4, 5, 6]],
axis=1))._data),
[6, 15])
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[1, 2, 3],
[4, 5, 6]],
axis=-1))._data),
[6, 15])
# Two dimensional, with keepdims=True
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[1, 2, 3],
[4, 5, 6]],
keepdims=True))._data),
[[21]])
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[1, 2, 3],
[5, 4, 6]],
axis=0,
keepdims=True))._data),
[[6, 6, 9]])
self.assertEqual(dd_as_py(blaze.eval(blaze.sum([[1, 5, 3],
[4, 2, 6]],
axis=1,
keepdims=True))._data),
[[9], [12]])
def test_all(self):
# Sanity check of reduction op
self.assertEqual(dd_as_py(blaze.eval(blaze.all(True))._data), True)
self.assertEqual(dd_as_py(blaze.eval(blaze.all(False))._data), False)
self.assertEqual(dd_as_py(blaze.eval(blaze.all(blaze.array([], dshape='0 * bool')))._data), True)
self.assertEqual(dd_as_py(blaze.eval(blaze.all([False, True]))._data),
False)
self.assertEqual(dd_as_py(blaze.eval(blaze.all([True, True]))._data),
True)
def test_any(self):
# Sanity check of reduction op
self.assertEqual(dd_as_py(blaze.eval(blaze.any(True))._data), True)
self.assertEqual(dd_as_py(blaze.eval(blaze.any(False))._data), False)
self.assertEqual(dd_as_py(blaze.eval(blaze.any(blaze.array([], dshape='0 * bool')))._data), False)
self.assertEqual(dd_as_py(blaze.eval(blaze.any([False, True]))._data),
True)
self.assertEqual(dd_as_py(blaze.eval(blaze.any([False, False]))._data),
False)
def test_max(self):
# Sanity check of reduction op
self.assertEqual(dd_as_py(blaze.eval(blaze.max(5))._data), 5)
self.assertRaises(ValueError, blaze.eval, blaze.max([]))
self.assertEqual(dd_as_py(blaze.eval(blaze.max([3, -2]))._data),
3)
self.assertEqual(dd_as_py(blaze.eval(blaze.max([1.5, 2.0]))._data),
2.0)
def test_product(self):
# Sanity check of reduction op
self.assertEqual(dd_as_py(blaze.eval(blaze.product(5))._data), 5)
self.assertEqual(dd_as_py(blaze.eval(blaze.product([]))._data), 1)
self.assertEqual(dd_as_py(blaze.eval(blaze.product([3, -2]))._data),
-6)
self.assertEqual(dd_as_py(blaze.eval(blaze.product([1.5, 2.0]))._data),
3.0)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "mwiebe/blaze",
"path": "blaze/tests/test_calc.py",
"copies": "1",
"size": "13856",
"license": "bsd-3-clause",
"hash": -3796418383526555600,
"line_mean": 52.2923076923,
"line_max": 106,
"alpha_frac": 0.4131062356,
"autogenerated": false,
"ratio": 3.778565584946823,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4691671820546823,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import datashape
import blaze
from blaze.optional_packages import tables_is_here
from blaze.catalog.tests.catalog_harness import CatalogHarness
from blaze.py2help import skipIf
class TestCatalog(unittest.TestCase):
def setUp(self):
self.cat = CatalogHarness()
blaze.catalog.load_config(self.cat.catfile)
def tearDown(self):
blaze.catalog.load_default()
self.cat.close()
def test_dir_traversal(self):
blaze.catalog.cd('/')
self.assertEquals(blaze.catalog.cwd(), '/')
entities = ['csv_arr', 'json_arr', 'npy_arr', 'py_arr', 'subdir']
if tables_is_here:
entities.append('hdf5_arr')
self.assertEquals(blaze.catalog.ls(), sorted(entities))
arrays = ['csv_arr', 'json_arr', 'npy_arr', 'py_arr']
if tables_is_here:
arrays.append('hdf5_arr')
self.assertEquals(blaze.catalog.ls_arrs(), sorted(arrays))
self.assertEquals(blaze.catalog.ls_dirs(),
['hdf5_dir', 'subdir'])
blaze.catalog.cd('subdir')
self.assertEquals(blaze.catalog.cwd(), '/subdir')
self.assertEquals(blaze.catalog.ls(),
['csv_arr2'])
def test_load_csv(self):
# Confirms that a simple csv file can be loaded
blaze.catalog.cd('/')
a = blaze.catalog.get('csv_arr')
ds = datashape.dshape('5 * {Letter: string, Number: int32}')
self.assertEqual(a.dshape, ds)
dat = blaze.datadescriptor.dd_as_py(a._data)
self.assertEqual(dat, [{'Letter': 'alpha', 'Number': 0},
{'Letter': 'beta', 'Number': 1},
{'Letter': 'gamma', 'Number': 2},
{'Letter': 'delta', 'Number': 3},
{'Letter': 'epsilon', 'Number': 4}])
def test_load_json(self):
# Confirms that a simple json file can be loaded
blaze.catalog.cd('/')
a = blaze.catalog.get('json_arr')
ds = datashape.dshape('2 * var * int32')
self.assertEqual(a.dshape, ds)
dat = blaze.datadescriptor.dd_as_py(a._data)
self.assertEqual(dat, [[1, 2, 3], [1, 2]])
@skipIf(not tables_is_here, 'PyTables is not installed')
def test_load_hdf5(self):
# Confirms that a simple hdf5 array in a file can be loaded
blaze.catalog.cd('/')
a = blaze.catalog.get('hdf5_arr')
ds = datashape.dshape('2 * 3 * int32')
self.assertEqual(a.dshape, ds)
dat = blaze.datadescriptor.dd_as_py(a._data)
self.assertEqual(dat, [[1, 2, 3], [3, 2, 1]])
@skipIf(not tables_is_here, 'PyTables is not installed')
def test_hdf5_dir(self):
blaze.catalog.cd('/hdf5_dir')
self.assertEquals(blaze.catalog.cwd(), '/hdf5_dir')
self.assertEquals(blaze.catalog.ls(), sorted(['a1', 'mygroup']))
self.assertEquals(blaze.catalog.ls_dirs(), sorted(['mygroup']))
self.assertEquals(blaze.catalog.ls_arrs(), sorted(['a1']))
@skipIf(not tables_is_here, 'PyTables is not installed')
def test_hdf5_subdir(self):
blaze.catalog.cd('/hdf5_dir/mygroup')
self.assertEquals(blaze.catalog.cwd(), '/hdf5_dir/mygroup')
self.assertEquals(blaze.catalog.ls(),
sorted(['a2', 'a3', 'mygroup2']))
self.assertEquals(blaze.catalog.ls_dirs(), sorted(['mygroup2']))
self.assertEquals(blaze.catalog.ls_arrs(), sorted(['a2', 'a3']))
@skipIf(not tables_is_here, 'PyTables is not installed')
def test_hdf5_subdir_get(self):
blaze.catalog.cd('/hdf5_dir/mygroup')
a = blaze.catalog.get('a3')
ds = datashape.dshape('2 * 3 * int32')
self.assertEqual(a.dshape, ds)
dat = blaze.datadescriptor.dd_as_py(a._data)
self.assertEqual(dat, [[1, 3, 2], [2, 1, 3]])
@skipIf(not tables_is_here, 'PyTables is not installed')
def test_hdf5_subdir_ls(self):
# Check top level
blaze.catalog.cd('/')
lall = blaze.catalog.ls_dirs()
self.assertEqual(lall, ['hdf5_dir', 'subdir'])
# Check HDF5 root level
blaze.catalog.cd('/hdf5_dir')
larrs = blaze.catalog.ls_arrs()
self.assertEqual(larrs, ['a1'])
ldirs = blaze.catalog.ls_dirs()
self.assertEqual(ldirs, ['mygroup'])
lall = blaze.catalog.ls()
self.assertEqual(lall, ['a1', 'mygroup'])
# Check HDF5 second level
blaze.catalog.cd('/hdf5_dir/mygroup')
larrs = blaze.catalog.ls_arrs()
self.assertEqual(larrs, ['a2', 'a3'])
ldirs = blaze.catalog.ls_dirs()
self.assertEqual(ldirs, ['mygroup2'])
lall = blaze.catalog.ls()
self.assertEqual(lall, ['a2', 'a3', 'mygroup2'])
def test_load_npy(self):
# Confirms that a simple npy file can be loaded
blaze.catalog.cd('/')
a = blaze.catalog.get('npy_arr')
ds = datashape.dshape('20 * {idx: int32, val: string}')
self.assertEqual(a.dshape, ds)
dat = blaze.datadescriptor.dd_as_py(a._data)
self.assertEqual([x['idx'] for x in dat],
list(range(20)))
self.assertEqual([x['val'] for x in dat],
['yes', 'no'] * 10)
def test_load_py(self):
# Confirms that a simple py file can generate a blaze array
blaze.catalog.cd('/')
a = blaze.catalog.get('py_arr')
ds = datashape.dshape('5 * int32')
self.assertEqual(a.dshape, ds)
dat = blaze.datadescriptor.dd_as_py(a._data)
self.assertEqual(dat, [1, 2, 3, 4, 5])
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "xsixing/blaze",
"path": "blaze/catalog/tests/test_catalog.py",
"copies": "2",
"size": "5754",
"license": "bsd-3-clause",
"hash": 8851084069624317000,
"line_mean": 39.5211267606,
"line_max": 73,
"alpha_frac": 0.5742092457,
"autogenerated": false,
"ratio": 3.289879931389365,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4864089177089366,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from dynd import nd, ndt
import blaze
import unittest
import tempfile
import os, os.path
import glob
import shutil
import blaze
# Useful superclass for disk-based tests
class MayBePersistentTest(unittest.TestCase):
disk = None
def setUp(self):
if self.disk == 'BLZ':
prefix = 'blaze-' + self.__class__.__name__
suffix = '.blz'
path1 = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
os.rmdir(path1)
self.ddesc1 = blaze.BLZ_DDesc(path1, mode='w')
path2 = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
os.rmdir(path2)
self.ddesc2 = blaze.BLZ_DDesc(path2, mode='w')
path3 = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
os.rmdir(path3)
self.ddesc3 = blaze.BLZ_DDesc(path3, mode='w')
elif self.disk == 'HDF5':
prefix = 'hdf5-' + self.__class__.__name__
suffix = '.hdf5'
dpath = "/earray"
h, path1 = tempfile.mkstemp(suffix=suffix, prefix=prefix)
os.close(h) # close the non needed file handle
self.ddesc1 = blaze.HDF5_DDesc(path1, dpath, mode='w')
h, path2 = tempfile.mkstemp(suffix=suffix, prefix=prefix)
os.close(h)
self.ddesc2 = blaze.HDF5_DDesc(path2, dpath, mode='w')
h, path3 = tempfile.mkstemp(suffix=suffix, prefix=prefix)
os.close(h)
self.ddesc3 = blaze.HDF5_DDesc(path3, dpath, mode='w')
else:
self.ddesc1 = None
self.ddesc2 = None
self.ddesc3 = None
def tearDown(self):
if self.disk:
self.ddesc1.remove()
self.ddesc2.remove()
self.ddesc3.remove()
# Check for arrays that fit in the chunk size
class evalTest(unittest.TestCase):
vm = "numexpr" # if numexpr not available, it will fall back to python
N = 1000
def test00(self):
"""Testing elwise_eval() with only scalars and constants"""
a = 3
cr = blaze._elwise_eval("2 * a", vm=self.vm)
self.assert_(cr == 6, "eval does not work correctly")
def test01(self):
"""Testing with only blaze arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
c = blaze.array(a)
d = blaze.array(b)
cr = blaze._elwise_eval("c * d", vm=self.vm)
nr = a * b
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test02(self):
"""Testing with only numpy arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
cr = blaze._elwise_eval("a * b", vm=self.vm)
nr = a * b
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test03(self):
"""Testing with only dynd arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
c = nd.array(a)
d = nd.array(b)
cr = blaze._elwise_eval("c * d", vm=self.vm)
nr = a * b
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test04(self):
"""Testing with a mix of blaze, numpy and dynd arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
b = blaze.array(b)
d = nd.array(a)
cr = blaze._elwise_eval("a * b + d", vm=self.vm)
nr = a * b + d
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test05(self):
"""Testing with a mix of scalars and blaze, numpy and dynd arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
b = blaze.array(b)
d = nd.array(a)
cr = blaze._elwise_eval("a * b + d + 2", vm=self.vm)
nr = a * b + d + 2
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test06(self):
"""Testing reductions on blaze arrays"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
a, b = np.arange(self.N), np.arange(1, self.N+1)
b = blaze.array(b)
cr = blaze._elwise_eval("sum(b + 2)", vm=self.vm)
nr = np.sum(b + 2)
self.assert_(cr == nr, "eval does not work correctly")
# Check for arrays that fit in the chunk size
# Using the Python VM (i.e. Blaze machinery) here
class evalPythonTest(evalTest):
vm = "python"
# Check for arrays that are larger than a chunk
class evalLargeTest(evalTest):
N = 10000
# Check for arrays that are larger than a chunk
# Using the Python VM (i.e. Blaze machinery) here
class evalPythonLargeTest(evalTest):
N = 10000
vm = "python"
# Check for arrays stored on-disk, but fit in a chunk
# Check for arrays that fit in memory
class storageTest(MayBePersistentTest):
N = 1000
vm = "numexpr"
disk = "BLZ"
def test00(self):
"""Testing elwise_eval() with only blaze arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
c = blaze.array(a, ddesc=self.ddesc1)
d = blaze.array(b, ddesc=self.ddesc2)
cr = blaze._elwise_eval("c * d", vm=self.vm, ddesc=self.ddesc3)
nr = a * b
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test01(self):
"""Testing elwise_eval() with blaze arrays and constants"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
c = blaze.array(a, ddesc=self.ddesc1)
d = blaze.array(b, ddesc=self.ddesc2)
cr = blaze._elwise_eval("c * d + 1", vm=self.vm, ddesc=self.ddesc3)
nr = a * b + 1
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test03(self):
"""Testing elwise_eval() with blaze and dynd arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
c = blaze.array(a, ddesc=self.ddesc1)
d = nd.array(b)
cr = blaze._elwise_eval("c * d + 1", vm=self.vm, ddesc=self.ddesc3)
nr = a * b + 1
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test04(self):
"""Testing elwise_eval() with blaze, dynd and numpy arrays"""
a, b = np.arange(self.N), np.arange(1, self.N+1)
c = blaze.array(a, ddesc=self.ddesc1)
d = nd.array(b)
cr = blaze._elwise_eval("a * c + d", vm=self.vm, ddesc=self.ddesc3)
nr = a * c + d
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test05(self):
"""Testing reductions on blaze arrays"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
a, b = np.arange(self.N), np.arange(1, self.N+1)
b = blaze.array(b, ddesc=self.ddesc1)
cr = blaze._elwise_eval("sum(b + 2)", vm=self.vm, ddesc=self.ddesc3)
nr = np.sum(b + 2)
self.assert_(cr == nr, "eval does not work correctly")
# Check for arrays stored on-disk, but fit in a chunk
# Using the Python VM (i.e. Blaze machinery) here
class storagePythonTest(storageTest):
vm = "python"
# Check for arrays stored on-disk, but are larger than a chunk
class storageLargeTest(storageTest):
N = 10000
# Check for arrays stored on-disk, but are larger than a chunk
# Using the Python VM (i.e. Blaze machinery) here
class storagePythonLargeTest(storageTest):
N = 10000
vm = "python"
# Check for arrays stored on-disk, but fit in a chunk
class storageHDF5Test(storageTest):
disk = "HDF5"
# Check for arrays stored on-disk, but are larger than a chunk
class storageLargeHDF5Test(storageTest):
N = 10000
disk = "HDF5"
####################################
# Multidimensional tests start now
####################################
# Check for arrays that fit in a chunk
class evalMDTest(unittest.TestCase):
N = 10
M = 100
vm = "numexpr"
def test00(self):
"""Testing elwise_eval() with only blaze arrays"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a)
d = blaze.array(b)
cr = blaze._elwise_eval("c * d", vm=self.vm)
nr = a * b
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test01(self):
"""Testing elwise_eval() with blaze arrays and scalars"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a)
d = blaze.array(b)
cr = blaze._elwise_eval("c * d + 2", vm=self.vm)
nr = a * b + 2
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test02(self):
"""Testing elwise_eval() with pure dynd arrays and scalars"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = nd.array(a)
d = nd.array(b)
cr = blaze._elwise_eval("c * d + 2", vm=self.vm)
nr = a * b + 2
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test03(self):
"""Testing elwise_eval() with blaze and dynd arrays and scalars"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a)
d = nd.array(b)
cr = blaze._elwise_eval("c * d + 2", vm=self.vm)
nr = a * b + 2
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test04(self):
"""Testing reductions on blaze arrays"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
b = blaze.array(b)
cr = blaze._elwise_eval("sum(b + 2)", vm=self.vm)
nr = np.sum(b + 2)
self.assert_(cr == nr, "eval does not work correctly")
def test05(self):
"""Testing reductions on blaze arrays and axis=0"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
b = blaze.array(b)
cr = blaze._elwise_eval("sum(b + 2, axis=0)", vm=self.vm)
nr = np.sum(b + 2, axis=0)
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test06(self):
"""Testing reductions on blaze arrays and axis=1"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
self.assertRaises(NotImplementedError,
blaze._elwise_eval, "sum([[1,2],[3,4]], axis=1)")
# Check for arrays that fit in a chunk
# Using the Python VM (i.e. Blaze machinery) here
class evalPythonMDTest(evalMDTest):
vm = "python"
# Check for arrays that does not fit in a chunk
class evalLargeMDTest(evalMDTest):
N = 100
M = 100
# Check for arrays that does not fit in a chunk, but using python VM
class evalPythonLargeMDTest(evalMDTest):
N = 100
M = 100
vm = "python"
# Check for arrays that fit in a chunk (HDF5)
class evalMDHDF5Test(evalMDTest):
disk = "HDF5"
# Check for arrays that does not fit in a chunk (HDF5)
class evalLargeMDHDF5Test(evalMDTest):
N = 100
M = 100
disk = "HDF5"
# Check for arrays stored on-disk, but fit in a chunk
# Check for arrays that fit in memory
class storageMDTest(MayBePersistentTest):
N = 10
M = 100
vm = "numexpr"
disk = "BLZ"
def test00(self):
"""Testing elwise_eval() with only blaze arrays"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a, ddesc=self.ddesc1)
d = blaze.array(b, ddesc=self.ddesc2)
cr = blaze._elwise_eval("c * d", vm=self.vm, ddesc=self.ddesc3)
nr = a * b
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test01(self):
"""Testing elwise_eval() with blaze arrays and constants"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a, ddesc=self.ddesc1)
d = blaze.array(b, ddesc=self.ddesc2)
cr = blaze._elwise_eval("c * d + 1", vm=self.vm, ddesc=self.ddesc3)
nr = a * b + 1
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test03(self):
"""Testing elwise_eval() with blaze and dynd arrays"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a, ddesc=self.ddesc1)
d = nd.array(b)
cr = blaze._elwise_eval("c * d + 1", vm=self.vm, ddesc=self.ddesc3)
nr = a * b + 1
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test04(self):
"""Testing elwise_eval() with blaze, dynd and numpy arrays"""
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
c = blaze.array(a, ddesc=self.ddesc1)
d = nd.array(b)
cr = blaze._elwise_eval("a * c + d", vm=self.vm, ddesc=self.ddesc3)
nr = a * c + d
assert_array_equal(cr[:], nr, "eval does not work correctly")
def test05(self):
"""Testing reductions on blaze arrays"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
b = blaze.array(b, ddesc=self.ddesc1)
cr = blaze._elwise_eval("sum(b + 2)", vm=self.vm, ddesc=self.ddesc3)
nr = np.sum(b + 2)
self.assert_(cr == nr, "eval does not work correctly")
def test06(self):
"""Testing reductions on blaze arrays and axis=0"""
if self.vm == "python":
# The reductions does not work well using Blaze expressions yet
return
a = np.arange(self.N*self.M).reshape(self.N, self.M)
b = np.arange(1, self.N*self.M+1).reshape(self.N, self.M)
b = blaze.array(b, ddesc=self.ddesc1)
cr = blaze._elwise_eval("sum(b, axis=0)",
vm=self.vm, ddesc=self.ddesc3)
nr = np.sum(b, axis=0)
assert_array_equal(cr, nr, "eval does not work correctly")
# Check for arrays stored on-disk, but fit in a chunk
# Using the Python VM (i.e. Blaze machinery) here
class storagePythonMDTest(storageMDTest):
vm = "python"
# Check for arrays stored on-disk, but are larger than a chunk
class storageLargeMDTest(storageMDTest):
N = 500
# Check for arrays stored on-disk, but are larger than a chunk
# Using the Python VM (i.e. Blaze machinery) here
class storagePythonLargeMDTest(storageMDTest):
N = 500
vm = "python"
# Check for arrays stored on-disk, but fit in a chunk
class storageMDHDF5Test(storageMDTest):
disk = "HDF5"
# Check for arrays stored on-disk, but are larger than a chunk
class storageLargeMDHDF5Test(storageMDTest):
N = 500
disk = "HDF5"
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "talumbau/blaze",
"path": "blaze/compute/tests/test_elwise_eval.py",
"copies": "1",
"size": "15565",
"license": "bsd-3-clause",
"hash": 3863244668738426000,
"line_mean": 35.0300925926,
"line_max": 76,
"alpha_frac": 0.5866366849,
"autogenerated": false,
"ratio": 3.1030701754385963,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4189706860338596,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
import blaze
from blaze.datadescriptor import dd_as_py
from datashape import to_numpy_dtype
class TestBasicTypes(unittest.TestCase):
def test_ints(self):
types = ['int8', 'int16', 'int32', 'int64']
for type_ in types:
a = blaze.array(np.arange(3), dshape=type_)
dtype = to_numpy_dtype(a.dshape)
self.assertEqual(dtype, np.dtype(type_))
self.assertEqual(dd_as_py(a._data), [0, 1, 2])
def test_uints(self):
types = ['uint8', 'uint16', 'uint32', 'uint64']
for type_ in types:
a = blaze.array(np.arange(3), dshape=type_)
dtype = to_numpy_dtype(a.dshape)
self.assertEqual(dtype, np.dtype(type_))
self.assertEqual(dd_as_py(a._data), [0, 1, 2])
def test_floats(self):
#types = ['float16', 'float32', 'float64']
types = ['float32', 'float64']
for type_ in types:
a = blaze.array(np.arange(3), dshape=type_)
dtype = to_numpy_dtype(a.dshape)
self.assertEqual(dtype, np.dtype(type_))
if type_ != 'float16':
# dd_as_py does not support this yet
self.assertEqual(dd_as_py(a._data), [0, 1, 2])
def test_complex(self):
types = ['complex64', 'complex128']
for type_ in types:
a = blaze.array(np.arange(3), dshape=type_)
dtype = to_numpy_dtype(a.dshape)
self.assertEqual(dtype, np.dtype(type_))
# dd_as_py does not support complexes yet..
self.assertEqual(dd_as_py(a._data), [0, 1, 2])
| {
"repo_name": "xsixing/blaze",
"path": "blaze/tests/test_types.py",
"copies": "2",
"size": "1701",
"license": "bsd-3-clause",
"hash": -7271557337315856000,
"line_mean": 35.1914893617,
"line_max": 64,
"alpha_frac": 0.5661375661,
"autogenerated": false,
"ratio": 3.3029126213592233,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4869050187459223,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
import datashape
import blaze
from blaze.datadescriptor import dd_as_py
from blaze.tests.common import MayBeUriTest
from blaze import append
from blaze.py2help import skip
class TestEphemeral(unittest.TestCase):
def test_create_scalar(self):
a = blaze.array(True)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('bool'))
self.assertEqual(bool(a), True)
a = blaze.array(-123456)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('int32'))
self.assertEqual(int(a), -123456)
a = blaze.array(-1.25e-10)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('float64'))
self.assertEqual(float(a), -1.25e-10)
a = blaze.array(-1.25e-10+2.5j)
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('complex[float64]'))
self.assertEqual(complex(a), -1.25e-10+2.5j)
def test_create_from_numpy(self):
a = blaze.array(np.arange(3))
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [0, 1, 2])
def test_create(self):
# A default array (backed by NumPy)
a = blaze.array([1,2,3])
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [1, 2, 3])
def test_create_append(self):
# A default array (backed by NumPy, append not supported yet)
a = blaze.array([])
self.assertTrue(isinstance(a, blaze.Array))
self.assertRaises(ValueError, append, a, [1,2,3])
# XXX The tests below still do not work
# self.assertEqual(a[0], 1)
# self.assertEqual(a[1], 2)
# self.assertEqual(a[2], 3)
def test_create_compress(self):
# A compressed array (backed by BLZ)
a = blaze.array(np.arange(1,4), caps={'compress': True})
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [1, 2, 3])
# XXX The tests below still do not work
# self.assertEqual(a[0], 1)
# self.assertEqual(a[1], 2)
# self.assertEqual(a[2], 3)
def test_create_iter(self):
# A simple 1D array
a = blaze.array(i for i in range(10))
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('10, int32'))
self.assertEqual(dd_as_py(a._data), list(range(10)))
# A nested iter
a = blaze.array((i for i in range(x)) for x in range(5))
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('5, var, int32'))
self.assertEqual(dd_as_py(a._data),
[[i for i in range(x)] for x in range(5)])
# A list of iter
a = blaze.array([range(3), (1.5*x for x in range(4)), iter([-1, 1])])
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(a.dshape, datashape.dshape('3, var, float64'))
self.assertEqual(dd_as_py(a._data),
[list(range(3)),
[1.5*x for x in range(4)],
[-1, 1]])
def test_create_compress_iter(self):
# A compressed array (backed by BLZ)
a = blaze.array((i for i in range(10)), caps={'compress': True})
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), list(range(10)))
def test_create_zeros(self):
# A default array
a = blaze.zeros('10, int64')
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [0]*10)
def test_create_compress_zeros(self):
# A compressed array (backed by BLZ)
a = blaze.zeros('10, int64', caps={'compress': True})
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [0]*10)
def test_create_ones(self):
# A default array
a = blaze.ones('10, int64')
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [1]*10)
def test_create_compress_ones(self):
# A compressed array (backed by BLZ)
a = blaze.ones('10, int64', caps={'compress': True})
self.assertTrue(isinstance(a, blaze.Array))
self.assertEqual(dd_as_py(a._data), [1]*10)
def test_create_record(self):
# A simple record array
a = blaze.array([(10, 3.5), (15, 2.25)],
dshape="var, {val: int32; flt: float32}")
self.assertEqual(dd_as_py(a._data), [{'val': 10, 'flt': 3.5},
{'val': 15, 'flt': 2.25}])
# Test field access via attributes
aval = a.val
self.assertEqual(dd_as_py(aval._data), [10, 15])
aflt = a.flt
self.assertEqual(dd_as_py(aflt._data), [3.5, 2.25])
class TestPersistent(MayBeUriTest, unittest.TestCase):
uri = True
def test_create(self):
persist = blaze.Storage(self.rooturi, format="blz")
a = blaze.array([], 'float64', storage=persist)
self.assertTrue(isinstance(a, blaze.Array))
print("->", a.dshape.shape)
self.assertTrue(a.dshape.shape == (0,))
self.assertEqual(dd_as_py(a._data), [])
def test_append(self):
persist = blaze.Storage(self.rooturi, format="blz")
a = blaze.zeros('0, float64', storage=persist)
self.assertTrue(isinstance(a, blaze.Array))
append(a,list(range(10)))
self.assertEqual(dd_as_py(a._data), list(range(10)))
# Using a 1-dim as the internal dimension
def test_append2(self):
persist = blaze.Storage(self.rooturi, format="blz")
a = blaze.empty('0, 2, float64', storage=persist)
self.assertTrue(isinstance(a, blaze.Array))
lvals = [[i,i*2] for i in range(10)]
append(a,lvals)
self.assertEqual(dd_as_py(a._data), lvals)
def test_open(self):
persist = blaze.Storage(self.rooturi, format="blz")
a = blaze.ones('0, float64', storage=persist)
append(a,range(10))
# Re-open the dataset in URI
a2 = blaze.open(persist)
self.assertTrue(isinstance(a2, blaze.Array))
self.assertEqual(dd_as_py(a2._data), list(range(10)))
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"repo_name": "aaronmartin0303/blaze",
"path": "blaze/tests/test_array_creation.py",
"copies": "1",
"size": "6505",
"license": "bsd-3-clause",
"hash": 276563344221429470,
"line_mean": 38.186746988,
"line_max": 77,
"alpha_frac": 0.5949269792,
"autogenerated": false,
"ratio": 3.17626953125,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9261729188995887,
"avg_score": 0.0018934642908224862,
"num_lines": 166
} |
from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from datashape import dshape
import blaze
from blaze.compute.function import BlazeFunc
from dynd import nd, _lowlevel
def create_overloaded_add():
# Create an overloaded blaze func, populate it with
# some ckernel implementations extracted from numpy,
# and test some calls on it.
myfunc = BlazeFunc('test', 'myfunc')
# overload int32 -> np.add
ckd = _lowlevel.ckernel_deferred_from_ufunc(np.add,
(np.int32, np.int32, np.int32),
False)
myfunc.add_overload("(A... * int32, A... * int32) -> A... * int32", ckd)
# overload int16 -> np.subtract (so we can see the difference)
ckd = _lowlevel.ckernel_deferred_from_ufunc(np.subtract,
(np.int16, np.int16, np.int16),
False)
myfunc.add_overload("(A... * int16, A... * int16) -> A... * int16", ckd)
return myfunc
class TestBlazeFunctionFromUFunc(unittest.TestCase):
def test_overload(self):
myfunc = create_overloaded_add()
# Test int32 overload -> add
a = blaze.eval(myfunc(blaze.array([3, 4]), blaze.array([1, 2])))
self.assertEqual(a.dshape, dshape('2 * int32'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [4, 6])
# Test int16 overload -> subtract
a = blaze.eval(myfunc(blaze.array([3, 4], dshape='int16'),
blaze.array([1, 2], dshape='int16')))
self.assertEqual(a.dshape, dshape('2 * int16'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [2, 2])
def test_overload_coercion(self):
myfunc = create_overloaded_add()
# Test type promotion to int32
a = blaze.eval(myfunc(blaze.array([3, 4], dshape='int16'),
blaze.array([1, 2])))
self.assertEqual(a.dshape, dshape('2 * int32'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [4, 6])
a = blaze.eval(myfunc(blaze.array([3, 4]),
blaze.array([1, 2], dshape='int16')))
self.assertEqual(a.dshape, dshape('2 * int32'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [4, 6])
# Test type promotion to int16
a = blaze.eval(myfunc(blaze.array([3, 4], dshape='int8'),
blaze.array([1, 2], dshape='int8')))
self.assertEqual(a.dshape, dshape('2 * int16'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [2, 2])
def test_nesting(self):
myfunc = create_overloaded_add()
# A little bit of nesting
a = blaze.eval(myfunc(myfunc(blaze.array([3, 4]), blaze.array([1, 2])),
blaze.array([2, 10])))
self.assertEqual(a.dshape, dshape('2 * int32'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [6, 16])
def test_nesting_and_coercion(self):
myfunc = create_overloaded_add()
# More nesting, with conversions
a = blaze.eval(myfunc(myfunc(blaze.array([1, 2]),
blaze.array([-2, 10])),
myfunc(blaze.array([1, 5], dshape='int16'),
blaze.array(3, dshape='int16'))))
self.assertEqual(a.dshape, dshape('2 * int32'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [-3, 14])
def test_overload_different_argcount(self):
myfunc = BlazeFunc('test', 'ovld')
# Two parameter overload
ckd = _lowlevel.ckernel_deferred_from_ufunc(np.add,
(np.int32,) * 3,
False)
myfunc.add_overload("(A... * int32, A... * int32) -> A... * int32", ckd)
# One parameter overload
ckd = _lowlevel.ckernel_deferred_from_ufunc(np.negative,
(np.int32,) * 2, False)
myfunc.add_overload("(A... * int16, A... * int16) -> A... * int16", ckd)
return myfunc
if __name__ == '__main__':
# TestBlazeKernel('test_kernel').debug()
unittest.main()
| {
"repo_name": "mwiebe/blaze",
"path": "blaze/tests/test_blaze_functions.py",
"copies": "1",
"size": "4231",
"license": "bsd-3-clause",
"hash": 6340688238991609000,
"line_mean": 39.2952380952,
"line_max": 80,
"alpha_frac": 0.5327345781,
"autogenerated": false,
"ratio": 3.344664031620553,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4377398609720553,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from datashape import dshape
import blaze
from blaze.compute.function import ElementwiseBlazeFunc
from dynd import nd, _lowlevel
def create_overloaded_add():
# Create an overloaded blaze func, populate it with
# some ckernel implementations extracted from numpy,
# and test some calls on it.
myfunc = ElementwiseBlazeFunc('test', 'myfunc')
# overload int32 -> np.add
ckd = _lowlevel.ckernel_deferred_from_ufunc(np.add,
(np.int32, np.int32, np.int32),
False)
myfunc.add_overload("(int32, int32) -> int32", ckd)
# overload int16 -> np.subtract (so we can see the difference)
ckd = _lowlevel.ckernel_deferred_from_ufunc(np.subtract,
(np.int16, np.int16, np.int16),
False)
myfunc.add_overload("(int16, int16) -> int16", ckd)
return myfunc
class TestBlazeFunctionFromUFunc(unittest.TestCase):
def test_overload(self):
myfunc = create_overloaded_add()
# Test int32 overload -> add
a = blaze.eval(myfunc(blaze.array([3, 4]), blaze.array([1, 2])))
self.assertEqual(a.dshape, dshape('2 * int32'))
self.assertEqual(nd.as_py(a.ddesc.dynd_arr()), [4, 6])
# Test int16 overload -> subtract
a = blaze.eval(myfunc(blaze.array([3, 4], dshape='int16'),
blaze.array([1, 2], dshape='int16')))
self.assertEqual(a.dshape, dshape('2 * int16'))
self.assertEqual(nd.as_py(a.ddesc.dynd_arr()), [2, 2])
def test_overload_coercion(self):
myfunc = create_overloaded_add()
# Test type promotion to int32
a = blaze.eval(myfunc(blaze.array([3, 4], dshape='int16'),
blaze.array([1, 2])))
self.assertEqual(a.dshape, dshape('2 * int32'))
self.assertEqual(nd.as_py(a.ddesc.dynd_arr()), [4, 6])
a = blaze.eval(myfunc(blaze.array([3, 4]),
blaze.array([1, 2], dshape='int16')))
self.assertEqual(a.dshape, dshape('2 * int32'))
self.assertEqual(nd.as_py(a.ddesc.dynd_arr()), [4, 6])
# Test type promotion to int16
a = blaze.eval(myfunc(blaze.array([3, 4], dshape='int8'),
blaze.array([1, 2], dshape='int8')))
self.assertEqual(a.dshape, dshape('2 * int16'))
self.assertEqual(nd.as_py(a.ddesc.dynd_arr()), [2, 2])
def test_nesting(self):
myfunc = create_overloaded_add()
# A little bit of nesting
a = blaze.eval(myfunc(myfunc(blaze.array([3, 4]), blaze.array([1, 2])),
blaze.array([2, 10])))
self.assertEqual(a.dshape, dshape('2 * int32'))
self.assertEqual(nd.as_py(a.ddesc.dynd_arr()), [6, 16])
def test_nesting_and_coercion(self):
myfunc = create_overloaded_add()
# More nesting, with conversions
a = blaze.eval(myfunc(myfunc(blaze.array([1, 2]),
blaze.array([-2, 10])),
myfunc(blaze.array([1, 5], dshape='int16'),
blaze.array(3, dshape='int16'))))
self.assertEqual(a.dshape, dshape('2 * int32'))
self.assertEqual(nd.as_py(a.ddesc.dynd_arr()), [-3, 14])
def test_overload_different_argcount(self):
myfunc = ElementwiseBlazeFunc('test', 'ovld')
# Two parameter overload
ckd = _lowlevel.ckernel_deferred_from_ufunc(np.add,
(np.int32,) * 3,
False)
myfunc.add_overload("(int32, int32) -> int32", ckd)
# One parameter overload
ckd = _lowlevel.ckernel_deferred_from_ufunc(np.negative,
(np.int32,) * 2, False)
myfunc.add_overload("(int16, int16) -> int16", ckd)
return myfunc
if __name__ == '__main__':
# TestBlazeKernel('test_kernel').debug()
unittest.main()
| {
"repo_name": "sethkontny/blaze",
"path": "blaze/tests/test_blaze_functions.py",
"copies": "1",
"size": "4180",
"license": "bsd-3-clause",
"hash": 5558677850027961000,
"line_mean": 38.8095238095,
"line_max": 79,
"alpha_frac": 0.5459330144,
"autogenerated": false,
"ratio": 3.365539452495974,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4411472466895974,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from datashape import dshape
import blaze
from blaze.compute.function import function, kernel
from blaze import array, py2help
from dynd import nd, _lowlevel
# f
@function('X, Y, float32 -> X, Y, float32 -> X, Y, float32')
def f(a, b):
return a
@function('X, Y, complex64 -> X, Y, complex64 -> X, Y, complex64')
def f(a, b):
return a
@function('X, Y, complex128 -> X, Y, complex128 -> X, Y, complex128')
def f(a, b):
return a
# g
@function('X, Y, float32 -> X, Y, float32 -> X, int32')
def g(a, b):
return a
@function('X, Y, float32 -> ..., float32 -> X, int32')
def g(a, b):
return a
def create_overloaded_add():
# Create an overloaded blaze func, populate it with
# some ckernel implementations extracted from numpy,
# and test some calls on it.
#d = blaze.overloading.Dispatcher()
@function('A -> A -> A')
def myfunc(x, y):
raise NotImplementedError
# overload int32 -> np.add
ckd = _lowlevel.ckernel_deferred_from_ufunc(np.add,
(np.int32, np.int32, np.int32), False)
kernel(myfunc, "ckernel", ckd,
"A..., int32 -> A..., int32 -> A..., int32")
# overload int16 -> np.subtract (so we can see the difference)
ckd = _lowlevel.ckernel_deferred_from_ufunc(np.subtract,
(np.int16, np.int16, np.int16), False)
kernel(myfunc, "ckernel", ckd,
"A..., int16 -> A..., int16 -> A..., int16")
return myfunc
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
class TestBlazeKernel(unittest.TestCase):
def test_kernel(self):
A = array([8, 9], dshape('2, int32'))
res = f(A, A)
self.assertEqual(str(res.dshape), '1, 2, float32')
self.assertEqual(len(res.expr), 2)
graph, ctx = res.expr
self.assertEqual(len(graph.args), 2)
self.assertEqual(len(ctx.constraints), 0)
self.assertEqual(len(ctx.params), 1)
# res.view()
class TestBlazeFunctionFromUFunc(unittest.TestCase):
@py2help.skip
def test_overload(self):
myfunc = create_overloaded_add()
# Test int32 overload -> add
a = blaze.eval(myfunc(blaze.array([3,4]), blaze.array([1,2])))
self.assertEqual(a.dshape, blaze.dshape('2, int32'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [4, 6])
# Test int16 overload -> subtract
a = blaze.eval(myfunc(blaze.array([3,4], dshape='int16'),
blaze.array([1,2], dshape='int16')))
self.assertEqual(a.dshape, blaze.dshape('2, int16'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [2, 2])
@py2help.skip
def test_overload_coercion(self):
myfunc = create_overloaded_add()
# Test type promotion to int32
a = blaze.eval(myfunc(blaze.array([3,4], dshape='int16'),
blaze.array([1,2])))
self.assertEqual(a.dshape, blaze.dshape('2, int32'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [4, 6])
a = blaze.eval(myfunc(blaze.array([3,4]),
blaze.array([1,2], dshape='int16')))
self.assertEqual(a.dshape, blaze.dshape('2, int32'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [4, 6])
# Test type promotion to int16
a = blaze.eval(myfunc(blaze.array([3,4], dshape='int8'),
blaze.array([1,2], dshape='int8')))
self.assertEqual(a.dshape, blaze.dshape('2, int16'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [2, 2])
@py2help.skip
def test_nesting(self):
myfunc = create_overloaded_add()
# A little bit of nesting
a = blaze.eval(myfunc(myfunc(blaze.array([3,4]), blaze.array([1,2])),
blaze.array([2,10])))
self.assertEqual(a.dshape, blaze.dshape('2, int32'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [6, 16])
@py2help.skip
def test_nesting_and_coercion(self):
myfunc = create_overloaded_add()
# More nesting, with conversions
a = blaze.eval(myfunc(myfunc(blaze.array([1,2]), blaze.array([-2, 10])),
myfunc(blaze.array([1, 5], dshape='int16'),
blaze.array(3, dshape='int16'))))
self.assertEqual(a.dshape, blaze.dshape('2, int32'))
self.assertEqual(nd.as_py(a._data.dynd_arr()), [-3, 14])
if __name__ == '__main__':
# TestBlazeKernel('test_kernel').debug()
unittest.main()
| {
"repo_name": "aaronmartin0303/blaze",
"path": "blaze/tests/test_blaze_functions.py",
"copies": "1",
"size": "4654",
"license": "bsd-3-clause",
"hash": 3194633730198116000,
"line_mean": 32.4820143885,
"line_max": 80,
"alpha_frac": 0.5640309411,
"autogenerated": false,
"ratio": 3.0985352862849536,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41625662273849534,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import unittest
try:
# Python 3
from unittest import mock
except ImportError:
# Python 2
import mock
try:
# Python 2
from StringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
import time
from .context import print_utilities as p_utils
DEFAULT_MSG = 'TEST'
MOCK_TIMESTAMP = '20000101-000000'
MOCK_TIMESTAMP_2 = '20000101-000003'
DEFAULT_PRE = '['
DEFAULT_POST = ']'
DEFAULT_SEP = ':'
TEST_SLEEP = 3
def dummy(*args, **kwargs):
time.sleep(TEST_SLEEP)
class PrintUtilitiesTests(unittest.TestCase):
def setUp(self):
self.out = StringIO()
def tearDown(self):
del self.out
# @mock.patch(
# 'gkit_utils.time_utilities.get_timestamp', return_value=MOCK_TIMESTAMP)
# def test_timeit(self, mfunc):
# tag = (
# '[{ts}][STARTUP]: PROGRAM STARTED...\n\n**************************'
# '**********************************************\n\n***************'
# '*********************************************************\n\n'
# '[{ts}] PROGRAM ENDED.\nElapsed:\n\t00:00:{secs}').format(
# ts=MOCK_TIMESTAMP, secs=str(TEST_SLEEP).zfill(2))
# p_utils.timeit(func=dummy, out=self.out, stamped=True)
# print('\n' + '#' * 72 + '\n')
# print(self.out.getvalue().strip())
# print('\n' + '#' * 72 + '\n')
# print(tag)
# print('\n' + '#' * 72 + '\n')
# self.assertEqual(tag, self.out.getvalue().strip())
def test_print_divider(self):
token = '*'
count = 72
tag = token * count
p_utils.print_divider(
token=token, count=count, pre='', post='', out=self.out)
self.assertEqual(tag, self.out.getvalue().strip())
@mock.patch(
'gkit_utils.time_utilities.get_timestamp', return_value=MOCK_TIMESTAMP)
def test_print_message(self, mfunc):
tag = (
DEFAULT_PRE + MOCK_TIMESTAMP + DEFAULT_POST +
DEFAULT_PRE + DEFAULT_MSG + DEFAULT_POST + DEFAULT_SEP + ' ')
p_utils.print_message(
DEFAULT_MSG, tags=[DEFAULT_MSG], out=self.out, stamped=True)
self.assertEqual(tag + DEFAULT_MSG, self.out.getvalue().strip())
@mock.patch(
'gkit_utils.time_utilities.get_timestamp', return_value=MOCK_TIMESTAMP)
def test_print_event(self, mfunc):
tag = (
DEFAULT_PRE + MOCK_TIMESTAMP + DEFAULT_POST +
DEFAULT_PRE + 'EVENT' + DEFAULT_POST + DEFAULT_SEP + ' ')
p_utils.print_event(DEFAULT_MSG, out=self.out, stamped=True)
self.assertEqual(tag + DEFAULT_MSG, self.out.getvalue().strip())
@mock.patch(
'gkit_utils.time_utilities.get_timestamp', return_value=MOCK_TIMESTAMP)
def test_print_error(self, mfunc):
tag = (
DEFAULT_PRE + MOCK_TIMESTAMP + DEFAULT_POST +
DEFAULT_PRE + 'ERROR' + DEFAULT_POST + DEFAULT_SEP + ' ')
p_utils.print_error(DEFAULT_MSG, out=self.out, stamped=True)
self.assertEqual(tag + DEFAULT_MSG, self.out.getvalue().strip())
@mock.patch(
'gkit_utils.time_utilities.get_timestamp', return_value=MOCK_TIMESTAMP)
def test_print_success(self, mfunc):
tag = (
DEFAULT_PRE + MOCK_TIMESTAMP + DEFAULT_POST +
DEFAULT_PRE + 'SUCCESS' + DEFAULT_POST + DEFAULT_SEP + ' ')
p_utils.print_success(DEFAULT_MSG, post='', out=self.out, stamped=True)
self.assertEqual(tag + DEFAULT_MSG, self.out.getvalue().strip())
@mock.patch(
'gkit_utils.time_utilities.get_timestamp', return_value=MOCK_TIMESTAMP)
def test_print_startup(self, mfunc):
tag = (
DEFAULT_PRE + MOCK_TIMESTAMP + DEFAULT_POST +
DEFAULT_PRE + 'STARTUP' + DEFAULT_POST + DEFAULT_SEP + ' ')
p_utils.print_startup(DEFAULT_MSG, out=self.out, stamped=True)
self.assertEqual(tag + DEFAULT_MSG, self.out.getvalue().strip())
| {
"repo_name": "gannon93/gkit_utils",
"path": "tests/test_print_utilities.py",
"copies": "1",
"size": "4028",
"license": "mit",
"hash": 4336444804499228000,
"line_mean": 32.2892561983,
"line_max": 81,
"alpha_frac": 0.5757199603,
"autogenerated": false,
"ratio": 3.502608695652174,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4578328655952174,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import urllib, json
from ... import py2help
if py2help.PY2:
from urllib2 import urlopen
else:
from urllib.request import urlopen
def get_remote_datashape(url):
"""Gets the datashape of a remote array URL."""
response = urlopen(url + '?r=datashape')
return response.read().decode('utf8')
def get_remote_json(url):
"""Gets the JSON data of a remote array URL."""
response = urlopen(url + '?r=data.json')
return response.read()
def create_remote_session(base_url):
"""Creates a compute session rooted on the remote array URL."""
params = [('r', 'create_session')]
response = urlopen(base_url, urllib.urlencode(params))
return json.loads(response.read())
def close_remote_session(session_url):
"""Closes the remote compute session."""
params = [('r', 'close_session')]
response = urlopen(session_url, urllib.urlencode(params))
return json.loads(response.read())
def add_computed_fields(session_url, url, fields, rm_fields, fnname):
"""Creates a new remote array with the added computed fields."""
reqdata = {
"input": str(url),
"fields": [[str(name), str(dt), str(expr)]
for name, dt, expr in fields]
}
if len(rm_fields) > 0:
reqdata['rm_fields'] = [str(name) for name in rm_fields]
if fnname is not None:
reqdata['fnname'] = str(fnname)
params = [('r', 'add_computed_fields'),
('json', json.dumps(reqdata))]
response = urlopen(session_url, urllib.urlencode(params))
return json.loads(response.read())
def make_computed_fields(session_url, url, replace_undim, fields, fnname):
"""Creates a new remote array with the computed fields."""
reqdata = {
"input": str(url),
"replace_undim": int(replace_undim),
"fields": [[str(name), str(dt), str(expr)]
for name, dt, expr in fields]
}
if fnname is not None:
reqdata['fnname'] = str(fnname)
params = [('r', 'make_computed_fields'),
('json', json.dumps(reqdata))]
response = urlopen(session_url, urllib.urlencode(params))
return json.loads(response.read())
def sort(session_url, url, field):
"""Creates a new remote array which is sorted by field."""
reqdata = {
"input": str(url),
"field": field
}
params = [('r', 'sort'),
('json', json.dumps(reqdata))]
response = urlopen(session_url, urllib.urlencode(params))
return json.loads(response.read())
def groupby(session_url, url, fields):
reqdata = {
"input": str(url),
"fields": fields
}
params = [('r', 'groupby'),
('json', json.dumps(reqdata))]
response = urlopen(session_url, urllib.urlencode(params))
return json.loads(response.read())
| {
"repo_name": "cezary12/blaze",
"path": "blaze/io/client/requests.py",
"copies": "7",
"size": "2898",
"license": "bsd-3-clause",
"hash": -5464330104047525000,
"line_mean": 33.9156626506,
"line_max": 74,
"alpha_frac": 0.61042098,
"autogenerated": false,
"ratio": 3.7058823529411766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7816303332941177,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import urllib
import json
from ... import py2help
if py2help.PY2:
from urllib2 import urlopen
else:
from urllib.request import urlopen
def get_remote_datashape(url):
"""Gets the datashape of a remote array URL."""
response = urlopen(url + '?r=datashape')
return response.read().decode('utf8')
def get_remote_json(url):
"""Gets the JSON data of a remote array URL."""
response = urlopen(url + '?r=data.json')
return response.read()
def create_remote_session(base_url):
"""Creates a compute session rooted on the remote array URL."""
params = [('r', 'create_session')]
response = urlopen(base_url, urllib.urlencode(params))
return json.loads(response.read())
def close_remote_session(session_url):
"""Closes the remote compute session."""
params = [('r', 'close_session')]
response = urlopen(session_url, urllib.urlencode(params))
return json.loads(response.read())
def add_computed_fields(session_url, url, fields, rm_fields, fnname):
"""Creates a new remote array with the added computed fields."""
reqdata = {
"input": str(url),
"fields": [[str(name), str(dt), str(expr)]
for name, dt, expr in fields]
}
if len(rm_fields) > 0:
reqdata['rm_fields'] = [str(name) for name in rm_fields]
if fnname is not None:
reqdata['fnname'] = str(fnname)
params = [('r', 'add_computed_fields'),
('json', json.dumps(reqdata))]
response = urlopen(session_url, urllib.urlencode(params))
return json.loads(response.read())
def make_computed_fields(session_url, url, replace_undim, fields, fnname):
"""Creates a new remote array with the computed fields."""
reqdata = {
"input": str(url),
"replace_undim": int(replace_undim),
"fields": [[str(name), str(dt), str(expr)]
for name, dt, expr in fields]
}
if fnname is not None:
reqdata['fnname'] = str(fnname)
params = [('r', 'make_computed_fields'),
('json', json.dumps(reqdata))]
response = urlopen(session_url, urllib.urlencode(params))
return json.loads(response.read())
def sort(session_url, url, field):
"""Creates a new remote array which is sorted by field."""
reqdata = {
"input": str(url),
"field": field
}
params = [('r', 'sort'),
('json', json.dumps(reqdata))]
response = urlopen(session_url, urllib.urlencode(params))
return json.loads(response.read())
def groupby(session_url, url, fields):
reqdata = {
"input": str(url),
"fields": fields
}
params = [('r', 'groupby'),
('json', json.dumps(reqdata))]
response = urlopen(session_url, urllib.urlencode(params))
return json.loads(response.read())
| {
"repo_name": "mwiebe/blaze",
"path": "blaze/io/client/requests.py",
"copies": "6",
"size": "2905",
"license": "bsd-3-clause",
"hash": -7200900151490190000,
"line_mean": 33.1764705882,
"line_max": 74,
"alpha_frac": 0.6110154905,
"autogenerated": false,
"ratio": 3.705357142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7316372633357142,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import urwid
TABSTOP = 8
class SourceLine(urwid.FlowWidget):
def __init__(self, dbg_ui, text, line_nr='', attr=None, has_breakpoint=False):
self.dbg_ui = dbg_ui
self.text = text
self.attr = attr
self.line_nr = line_nr
self.has_breakpoint = has_breakpoint
self.is_current = False
self.highlight = False
def selectable(self):
return True
def set_current(self, is_current):
self.is_current = is_current
self._invalidate()
def set_highlight(self, highlight):
self.highlight = highlight
self._invalidate()
def set_breakpoint(self, has_breakpoint):
self.has_breakpoint = has_breakpoint
self._invalidate()
def rows(self, size, focus=False):
return 1
def render(self, size, focus=False):
from pudb.debugger import CONFIG
render_line_nr = CONFIG["line_numbers"]
maxcol = size[0]
hscroll = self.dbg_ui.source_hscroll_start
# attrs is a list of words like 'focused' and 'breakpoint'
attrs = []
if self.is_current:
crnt = ">"
attrs.append("current")
else:
crnt = " "
if self.has_breakpoint:
bp = "*"
attrs.append("breakpoint")
else:
bp = " "
if focus:
attrs.append("focused")
elif self.highlight:
if not self.has_breakpoint:
attrs.append("highlighted")
text = self.text
if not attrs and self.attr is not None:
attr = self.attr + [("source", None)]
else:
attr = [(" ".join(attrs+["source"]), None)]
from urwid.util import apply_target_encoding, trim_text_attr_cs
# build line prefix ---------------------------------------------------
line_prefix = ""
line_prefix_attr = []
if render_line_nr:
line_prefix_attr = [("line number", len(self.line_nr))]
line_prefix = self.line_nr
line_prefix = crnt+bp+line_prefix
line_prefix_attr = [("source", 1), ("breakpoint marker", 1)] \
+ line_prefix_attr
# assume rendered width is same as len
line_prefix_len = len(line_prefix)
encoded_line_prefix, line_prefix_cs = apply_target_encoding(line_prefix)
assert len(encoded_line_prefix) == len(line_prefix)
# otherwise we'd have to adjust line_prefix_attr... :/
# shipout, encoding ---------------------------------------------------
cs = []
encoded_text_segs = []
encoded_attr = []
i = 0
for seg_attr, seg_len in attr:
if seg_len is None:
# means: gobble up remainder of text and rest of line
# and fill with attribute
l = hscroll+maxcol
remaining_text = text[i:]
encoded_seg_text, seg_cs = apply_target_encoding(
remaining_text + l*" ")
encoded_attr.append((seg_attr, len(remaining_text)+l))
else:
unencoded_seg_text = text[i:i+seg_len]
encoded_seg_text, seg_cs = apply_target_encoding(unencoded_seg_text)
adjustment = len(encoded_seg_text) - len(unencoded_seg_text)
encoded_attr.append((seg_attr, seg_len + adjustment))
i += seg_len
encoded_text_segs.append(encoded_seg_text)
cs.extend(seg_cs)
encoded_text = b"".join(encoded_text_segs)
encoded_text, encoded_attr, cs = trim_text_attr_cs(
encoded_text, encoded_attr, cs,
hscroll, hscroll+maxcol-line_prefix_len)
encoded_text = encoded_line_prefix + encoded_text
encoded_attr = line_prefix_attr + encoded_attr
cs = line_prefix_cs + cs
return urwid.TextCanvas([encoded_text], [encoded_attr], [cs], maxcol=maxcol)
def keypress(self, size, key):
return key
def format_source(debugger_ui, lines, breakpoints):
lineno_format = "%%%dd " % (len(str(len(lines))))
try:
import pygments # noqa
except ImportError:
return [SourceLine(debugger_ui,
line.rstrip("\n\r").expandtabs(TABSTOP),
lineno_format % (i+1), None,
has_breakpoint=i+1 in breakpoints)
for i, line in enumerate(lines)]
else:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatter import Formatter
import pygments.token as t
result = []
argument_parser = ArgumentParser(t)
# NOTE: Tokens of the form t.Token.<name> are not native
# Pygments token types; they are user defined token
# types.
#
# t.Token is a Pygments token creator object
# (see http://pygments.org/docs/tokens/)
#
# The user defined token types get assigned by
# one of several translation operations at the
# beginning of add_snippet().
#
ATTR_MAP = {
t.Token: "source",
t.Keyword.Namespace: "namespace",
t.Token.Argument: "argument",
t.Token.Dunder: "dunder",
t.Token.Keyword2: 'keyword2',
t.Keyword: "keyword",
t.Literal: "literal",
t.Name.Exception: "exception",
t.Name.Function: "name",
t.Name.Class: "name",
t.Name.Builtin: "builtin",
t.Name.Builtin.Pseudo: "pseudo",
t.Punctuation: "punctuation",
t.Operator: "operator",
t.String: "string",
# XXX: Single and Double don't actually work yet.
# See https://bitbucket.org/birkenfeld/pygments-main/issue/685
t.String.Double: "doublestring",
t.String.Single: "singlestring",
t.String.Backtick: "backtick",
t.String.Doc: "docstring",
t.Comment: "comment",
}
# Token translation table. Maps token types and their
# associated strings to new token types.
ATTR_TRANSLATE = {
t.Keyword: {
'class': t.Token.Keyword2,
'def': t.Token.Keyword2,
'exec': t.Token.Keyword2,
'lambda': t.Token.Keyword2,
'print': t.Token.Keyword2,
},
t.Operator:{
'.': t.Token,
},
t.Name.Builtin.Pseudo:{
'self': t.Token,
},
t.Name.Builtin:{
'object': t.Name.Class,
},
}
class UrwidFormatter(Formatter):
def __init__(subself, **options):
Formatter.__init__(subself, **options)
subself.current_line = ""
subself.current_attr = []
subself.lineno = 1
def format(subself, tokensource, outfile):
def add_snippet(ttype, s):
if not s:
return
# Find function arguments. When found, change their
# ttype to t.Token.Argument
new_ttype = argument_parser.parse_token(ttype, s)
if new_ttype:
ttype = new_ttype
# Translate tokens
if ttype in ATTR_TRANSLATE:
if s in ATTR_TRANSLATE[ttype]:
ttype = ATTR_TRANSLATE[ttype][s]
# Translate dunder method tokens
if ttype == t.Name.Function and s.startswith('__') and s.endswith('__'):
ttype = t.Token.Dunder
while not ttype in ATTR_MAP:
if ttype.parent is not None:
ttype = ttype.parent
else:
raise RuntimeError(
"untreated token type: %s" % str(ttype))
attr = ATTR_MAP[ttype]
subself.current_line += s
subself.current_attr.append((attr, len(s)))
def shipout_line():
result.append(
SourceLine(debugger_ui,
subself.current_line,
lineno_format % subself.lineno,
subself.current_attr,
has_breakpoint=subself.lineno in breakpoints))
subself.current_line = ""
subself.current_attr = []
subself.lineno += 1
for ttype, value in tokensource:
while True:
newline_pos = value.find("\n")
if newline_pos == -1:
add_snippet(ttype, value)
break
else:
add_snippet(ttype, value[:newline_pos])
shipout_line()
value = value[newline_pos+1:]
if subself.current_line:
shipout_line()
highlight("".join(l.expandtabs(TABSTOP) for l in lines),
PythonLexer(stripnl=False), UrwidFormatter())
return result
class ParseState(object):
'''States for the ArgumentParser class'''
idle = 1
found_function = 2
found_open_paren = 3
class ArgumentParser(object):
'''Parse source code tokens and identify function arguments.
This parser implements a state machine which accepts
Pygments tokens, delivered sequentially from the beginning
of a source file to its end.
parse_token() processes each token (and its associated string)
and returns None if that token does not require modification.
When it finds a token which represents a function
argument, it returns the correct token type for that
item (the caller should then replace the associated item's
token type with the returned type)
'''
def __init__(self, pygments_token):
self.t = pygments_token
self.state = ParseState.idle
self.paren_level = 0
def parse_token(self, token, s):
'''Parse token. Return None or replacement token type'''
if self.state == ParseState.idle:
if token is self.t.Name.Function:
self.state = ParseState.found_function
self.paren_level = 0
elif self.state == ParseState.found_function:
if token is self.t.Punctuation and s == '(':
self.state = ParseState.found_open_paren
self.paren_level = 1
else:
if ((token is self.t.Name) or
(token is self.t.Name.Builtin.Pseudo and s == 'self')):
return self.t.Token.Argument
elif token is self.t.Punctuation and s == ')':
self.paren_level -= 1
elif token is self.t.Punctuation and s == '(':
self.paren_level += 1
if self.paren_level == 0:
self.state = ParseState.idle
return None
| {
"repo_name": "albfan/pudb",
"path": "pudb/source_view.py",
"copies": "1",
"size": "11598",
"license": "mit",
"hash": -7398406091849711000,
"line_mean": 34.4678899083,
"line_max": 92,
"alpha_frac": 0.5043973099,
"autogenerated": false,
"ratio": 4.551805337519623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5556202647419622,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import uuid
import operator
import numbers
from glue.external import six
from glue.core.component_link import BinaryComponentLink
from glue.core.subset import InequalitySubsetState
from glue.core.message import DataRenameComponentMessage
__all__ = ['ComponentID', 'PixelComponentID', 'ComponentIDDict', 'ComponentIDList']
# access to ComponentIDs via .item[name]
class ComponentIDList(list):
def __contains__(self, cid):
if isinstance(cid, six.string_types):
for c in self:
if cid == c.label:
return True
else:
return False
else:
return list.__contains__(self, cid)
class ComponentIDDict(object):
def __init__(self, data, **kwargs):
self.data = data
def __getitem__(self, key):
result = self.data.find_component_id(key)
if result is None:
raise KeyError("ComponentID not found or not unique: %s"
% key)
return result
class ComponentID(object):
"""
References a :class:`glue.core.component.Component` object within a :class:`~glue.core.data.Data` object.
ComponentIDs behave as keys::
component_id = data.id[name]
data[component_id] -> numpy array
"""
def __init__(self, label, hidden=False, parent=None):
""":param label: Name for the ID
:type label: str"""
self._label = str(label)
self._hidden = hidden
self.parent = parent
# We assign a UUID which can then be used for example in equations
# for derived components - the idea is that this doesn't change over
# the life cycle of glue, so it is a more reliable way to refer to
# components in strings than using labels
self._uuid = str(uuid.uuid4())
@property
def uuid(self):
return self._uuid
@property
def label(self):
return self._label
@label.setter
def label(self, value):
"""Change label.
.. warning::
Label changes are not currently tracked by client
classes. Label's should only be changd before creating other
client objects
"""
self._label = str(value)
if self.parent is not None and self.parent.hub:
msg = DataRenameComponentMessage(self.parent, self)
self.parent.hub.broadcast(msg)
@property
def hidden(self):
"""Whether to hide the component by default"""
return self._hidden
@hidden.setter
def hidden(self, value):
self._hidden = value
def __str__(self):
return str(self._label)
def __repr__(self):
return str(self._label)
def to_html(self):
if self.parent is None:
return str(self._label)
else:
return "<font color='#777777'>[{1}]</font>.{0}".format(self._label, self.parent._label)
def __eq__(self, other):
if isinstance(other, (numbers.Number, six.string_types)):
return InequalitySubsetState(self, other, operator.eq)
return other is self
# In Python 3, if __eq__ is defined, then __hash__ has to be re-defined
if six.PY3:
__hash__ = object.__hash__
def __ne__(self, other):
if isinstance(other, (numbers.Number, six.string_types)):
return InequalitySubsetState(self, other, operator.ne)
return other is not self
def __gt__(self, other):
return InequalitySubsetState(self, other, operator.gt)
def __ge__(self, other):
return InequalitySubsetState(self, other, operator.ge)
def __lt__(self, other):
return InequalitySubsetState(self, other, operator.lt)
def __le__(self, other):
return InequalitySubsetState(self, other, operator.le)
def __add__(self, other):
return BinaryComponentLink(self, other, operator.add)
def __radd__(self, other):
return BinaryComponentLink(other, self, operator.add)
def __sub__(self, other):
return BinaryComponentLink(self, other, operator.sub)
def __rsub__(self, other):
return BinaryComponentLink(other, self, operator.sub)
def __mul__(self, other):
return BinaryComponentLink(self, other, operator.mul)
def __rmul__(self, other):
return BinaryComponentLink(other, self, operator.mul)
def __div__(self, other):
return BinaryComponentLink(self, other, operator.div)
def __rdiv__(self, other):
return BinaryComponentLink(other, self, operator.div)
def __truediv__(self, other):
return BinaryComponentLink(self, other, operator.truediv)
def __rtruediv__(self, other):
return BinaryComponentLink(other, self, operator.truediv)
def __pow__(self, other):
return BinaryComponentLink(self, other, operator.pow)
def __rpow__(self, other):
return BinaryComponentLink(other, self, operator.pow)
class PixelComponentID(ComponentID):
"""
The ID of a component which is a pixel position in the data - this allows
us to make assumptions in certain places. For example when a polygon
selection is done in pixel space, it can easily be broadcast along
dimensions.
"""
def __init__(self, axis, label, hidden=False, parent=None):
self.axis = axis
super(PixelComponentID, self).__init__(label, hidden=hidden, parent=parent)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/component_id.py",
"copies": "1",
"size": "5485",
"license": "bsd-3-clause",
"hash": -440545150288055040,
"line_mean": 29.3038674033,
"line_max": 109,
"alpha_frac": 0.6231540565,
"autogenerated": false,
"ratio": 4.108614232209738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5231768288709738,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import uuid
import weakref
from matplotlib.colors import ColorConverter
from glue.core.data import Subset, Data
from glue.core.exceptions import IncompatibleAttribute
from glue.utils import broadcast_to
from glue.core.fixed_resolution_buffer import ARRAY_CACHE, PIXEL_CACHE
from .colors import get_translucent_cmap
from .layer_state import VolumeLayerState
from ..common.layer_artist import VispyLayerArtist
class DataProxy(object):
def __init__(self, viewer_state, layer_artist):
self._viewer_state = weakref.ref(viewer_state)
self._layer_artist = weakref.ref(layer_artist)
@property
def layer_artist(self):
return self._layer_artist()
@property
def viewer_state(self):
return self._viewer_state()
@property
def shape(self):
x_axis = self.viewer_state.x_att.axis
y_axis = self.viewer_state.y_att.axis
z_axis = self.viewer_state.z_att.axis
if isinstance(self.layer_artist.layer, Subset):
full_shape = self.layer_artist.layer.data.shape
else:
full_shape = self.layer_artist.layer.shape
return full_shape[z_axis], full_shape[y_axis], full_shape[x_axis]
def compute_fixed_resolution_buffer(self, bounds=None):
shape = [bound[2] for bound in bounds]
if self.layer_artist is None or self.viewer_state is None:
return broadcast_to(0, shape)
if isinstance(self.layer_artist.layer, Subset):
try:
subset_state = self.layer_artist.layer.subset_state
result = self.layer_artist.layer.data.compute_fixed_resolution_buffer(
target_data=self.layer_artist._viewer_state.reference_data,
bounds=bounds, subset_state=subset_state,
cache_id=self.layer_artist.id)
except IncompatibleAttribute:
self.layer_artist.disable_incompatible_subset()
return broadcast_to(0, shape)
else:
self.layer_artist.enable()
else:
try:
result = self.layer_artist.layer.compute_fixed_resolution_buffer(
target_data=self.layer_artist._viewer_state.reference_data,
bounds=bounds, target_cid=self.layer_artist.state.attribute,
cache_id=self.layer_artist.id)
except IncompatibleAttribute:
self.layer_artist.disable('Layer data is not fully linked to reference data')
return broadcast_to(0, shape)
else:
self.layer_artist.enable()
return result
class VolumeLayerArtist(VispyLayerArtist):
"""
A layer artist to render volumes.
This is more complex than for other visual types, because for volumes, we
need to manage all the volumes via a single MultiVolume visual class for
each data viewer.
"""
def __init__(self, vispy_viewer=None, layer=None, layer_state=None):
super(VolumeLayerArtist, self).__init__(layer)
self._clip_limits = None
self.layer = layer or layer_state.layer
self.vispy_widget = vispy_viewer._vispy_widget
# TODO: need to remove layers when layer artist is removed
self._viewer_state = vispy_viewer.state
self.state = layer_state or VolumeLayerState(layer=self.layer)
if self.state not in self._viewer_state.layers:
self._viewer_state.layers.append(self.state)
# We create a unique ID for this layer artist, that will be used to
# refer to the layer artist in the MultiVolume. We have to do this
# rather than use self.id because we can't guarantee the latter is
# unique.
self.id = str(uuid.uuid4())
self._multivol = self.vispy_widget._multivol
self._multivol.allocate(self.id)
self._viewer_state.add_global_callback(self._update_volume)
self.state.add_global_callback(self._update_volume)
self.reset_cache()
self._data_proxy = None
def reset_cache(self):
self._last_viewer_state = {}
self._last_layer_state = {}
@property
def visual(self):
return self._multivol
@property
def bbox(self):
return (-0.5, self.layer.shape[2] - 0.5,
-0.5, self.layer.shape[1] - 0.5,
-0.5, self.layer.shape[0] - 0.5)
@property
def shape(self):
return self.layer.shape
def redraw(self):
"""
Redraw the Vispy canvas
"""
self.vispy_widget.canvas.update()
def clear(self):
"""
Remove the layer artist from the visualization
"""
# We don't want to deallocate here because this can be called if we
# disable the layer due to incompatible attributes
self._multivol.disable(self.id)
def remove(self):
"""
Remove the layer artist for good
"""
self._multivol.deallocate(self.id)
ARRAY_CACHE.pop(self.id, None)
PIXEL_CACHE.pop(self.id, None)
def _update_cmap_from_color(self):
cmap = get_translucent_cmap(*ColorConverter().to_rgb(self.state.color))
self._multivol.set_cmap(self.id, cmap)
self.redraw()
def _update_limits(self):
if isinstance(self.layer, Subset):
self._multivol.set_clim(self.id, None)
else:
self._multivol.set_clim(self.id, (self.state.vmin, self.state.vmax))
self.redraw()
def _update_alpha(self):
self._multivol.set_weight(self.id, self.state.alpha)
self.redraw()
def _update_subset_mode(self):
if isinstance(self.state.layer, Data) or self.state.subset_mode == 'outline':
self._multivol.set_multiply(self.id, None)
else:
label = self._multivol.label_for_layer(self.state.layer.data)
self._multivol.set_multiply(self.id, label)
self.redraw()
def _update_data(self):
if self._data_proxy is None:
self._data_proxy = DataProxy(self._viewer_state, self)
self._multivol.set_data(self.id, self._data_proxy, layer=self.layer)
else:
self._multivol._update_scaled_data(self.id)
self._update_subset_mode()
def _update_visibility(self):
if self.state.visible:
self._multivol.enable(self.id)
else:
self._multivol.disable(self.id)
self.redraw()
def set_clip(self, limits):
pass
def _update_volume(self, force=False, **kwargs):
if self.state.attribute is None or self.state.layer is None:
return
# Figure out which attributes are different from before. Ideally we shouldn't
# need this but currently this method is called multiple times if an
# attribute is changed due to x_att changing then hist_x_min, hist_x_max, etc.
# If we can solve this so that _update_histogram is really only called once
# then we could consider simplifying this. Until then, we manually keep track
# of which properties have changed.
changed = set()
if not force:
for key, value in self._viewer_state.as_dict().items():
if value != self._last_viewer_state.get(key, None):
changed.add(key)
for key, value in self.state.as_dict().items():
if value != self._last_layer_state.get(key, None):
changed.add(key)
self._last_viewer_state.update(self._viewer_state.as_dict())
self._last_layer_state.update(self.state.as_dict())
if force or 'color' in changed:
self._update_cmap_from_color()
if force or 'vmin' in changed or 'vmax' in changed:
self._update_limits()
if force or 'alpha' in changed:
self._update_alpha()
if force or 'layer' in changed or 'attribute' in changed:
self._update_data()
if force or 'subset_mode' in changed:
self._update_subset_mode()
if force or 'visible' in changed:
self._update_visibility()
def update(self):
self._update_volume(force=True)
self.redraw()
| {
"repo_name": "astrofrog/glue-vispy-viewers",
"path": "glue_vispy_viewers/volume/layer_artist.py",
"copies": "2",
"size": "8312",
"license": "bsd-2-clause",
"hash": 3228586871450114000,
"line_mean": 32.248,
"line_max": 93,
"alpha_frac": 0.6120067372,
"autogenerated": false,
"ratio": 3.891385767790262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5503392504990262,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import uuid
import weakref
import numpy as np
from glue.utils import defer_draw
from glue.viewers.image.state import ImageLayerState, ImageSubsetLayerState
from glue.viewers.matplotlib.layer_artist import MatplotlibLayerArtist
from glue.core.exceptions import IncompatibleAttribute
from glue.utils import color2rgb
from glue.core.link_manager import is_equivalent_cid
from glue.core import Data, HubListener
from glue.core.message import ComponentsChangedMessage
from glue.external.modest_image import imshow
class BaseImageLayerArtist(MatplotlibLayerArtist, HubListener):
def __init__(self, axes, viewer_state, layer_state=None, layer=None):
super(BaseImageLayerArtist, self).__init__(axes, viewer_state,
layer_state=layer_state, layer=layer)
self.reset_cache()
# Watch for changes in the viewer state which would require the
# layers to be redrawn
self._viewer_state.add_global_callback(self._update_image)
self.state.add_global_callback(self._update_image)
self.layer.hub.subscribe(self, ComponentsChangedMessage,
handler=self._update_compatibility,
filter=self._is_data_object)
self._update_compatibility()
def _is_data_object(self, message):
if isinstance(self.layer, Data):
return message.sender is self.layer
else:
return message.sender is self.layer.data
def reset_cache(self):
self._last_viewer_state = {}
self._last_layer_state = {}
def _update_image(self, force=False, **kwargs):
raise NotImplementedError()
@defer_draw
def _update_compatibility(self, *args, **kwargs):
"""
Determine compatibility of data with reference data. For the data to be
compatible with the reference data, the number of dimensions has to
match and the pixel component IDs have to be equivalent.
"""
if self._viewer_state.reference_data is None:
self._compatible_with_reference_data = False
self.disable('No reference data defined')
return
if self.layer is self._viewer_state.reference_data:
self._compatible_with_reference_data = True
self.enable()
return
# Check whether the pixel component IDs of the dataset are equivalent
# to that of the reference dataset. In future this is where we could
# allow for these to be different and implement reprojection.
if self.layer.ndim != self._viewer_state.reference_data.ndim:
self._compatible_with_reference_data = False
self.disable('Data dimensions do not match reference data')
return
# Determine whether pixel component IDs are equivalent
pids = self.layer.pixel_component_ids
pids_ref = self._viewer_state.reference_data.pixel_component_ids
if isinstance(self.layer, Data):
data = self.layer
else:
data = self.layer.data
for i in range(data.ndim):
if not is_equivalent_cid(data, pids[i], pids_ref[i]):
self._compatible_with_reference_data = False
self.disable('Pixel component IDs do not match. You can try '
'fixing this by linking the pixel component IDs '
'of this dataset with those of the reference '
'dataset.')
return
self._compatible_with_reference_data = True
self.enable()
class ImageLayerArtist(BaseImageLayerArtist):
_layer_state_cls = ImageLayerState
def __init__(self, axes, viewer_state, layer_state=None, layer=None):
super(ImageLayerArtist, self).__init__(axes, viewer_state,
layer_state=layer_state, layer=layer)
# We use a custom object to deal with the compositing of images, and we
# store it as a private attribute of the axes to make sure it is
# accessible for all layer artists.
self.uuid = str(uuid.uuid4())
self.composite = self.axes._composite
self.composite.allocate(self.uuid)
self.composite.set(self.uuid, array=self.get_image_data,
shape=self.get_image_shape)
self.composite_image = self.axes._composite_image
def get_layer_color(self):
if self._viewer_state.color_mode == 'One color per layer':
return self.state.color
else:
return self.state.cmap
def enable(self):
if hasattr(self, 'composite_image'):
self.composite_image.invalidate_cache()
super(ImageLayerArtist, self).enable()
def remove(self):
super(ImageLayerArtist, self).remove()
self.composite.deallocate(self.uuid)
def get_image_shape(self):
if not self._compatible_with_reference_data:
return None
if self._viewer_state.x_att is None or self._viewer_state.y_att is None:
return None
x_axis = self._viewer_state.x_att.axis
y_axis = self._viewer_state.y_att.axis
full_shape = self.layer.shape
return full_shape[y_axis], full_shape[x_axis]
def get_image_data(self, view=None):
if not self._compatible_with_reference_data:
return None
try:
image = self.state.get_sliced_data(view=view)
except (IncompatibleAttribute, IndexError):
# The following includes a call to self.clear()
self.disable_invalid_attributes(self.state.attribute)
return None
else:
self.enable()
return image
def _update_image_data(self):
self.composite_image.invalidate_cache()
self.redraw()
@defer_draw
def _update_visual_attributes(self):
if not self.enabled:
return
if self._viewer_state.color_mode == 'Colormaps':
color = self.state.cmap
else:
color = self.state.color
self.composite.set(self.uuid,
clim=(self.state.v_min, self.state.v_max),
visible=self.state.visible,
zorder=self.state.zorder,
color=color,
contrast=self.state.contrast,
bias=self.state.bias,
alpha=self.state.alpha,
stretch=self.state.stretch)
self.composite_image.invalidate_cache()
self.redraw()
@defer_draw
def _update_image(self, force=False, **kwargs):
if self.state.attribute is None or self.state.layer is None:
return
# Figure out which attributes are different from before. Ideally we shouldn't
# need this but currently this method is called multiple times if an
# attribute is changed due to x_att changing then hist_x_min, hist_x_max, etc.
# If we can solve this so that _update_histogram is really only called once
# then we could consider simplifying this. Until then, we manually keep track
# of which properties have changed.
changed = set()
if not force:
for key, value in self._viewer_state.as_dict().items():
if value != self._last_viewer_state.get(key, None):
changed.add(key)
for key, value in self.state.as_dict().items():
if value != self._last_layer_state.get(key, None):
changed.add(key)
self._last_viewer_state.update(self._viewer_state.as_dict())
self._last_layer_state.update(self.state.as_dict())
if 'reference_data' in changed or 'layer' in changed:
self._update_compatibility()
if force or any(prop in changed for prop in ('layer', 'attribute',
'slices', 'x_att', 'y_att')):
self._update_image_data()
force = True # make sure scaling and visual attributes are updated
if force or any(prop in changed for prop in ('v_min', 'v_max', 'contrast',
'bias', 'alpha', 'color_mode',
'cmap', 'color', 'zorder',
'visible', 'stretch')):
self._update_visual_attributes()
@defer_draw
def update(self):
self._update_image(force=True)
# Reset the axes stack so that pressing the home button doesn't go back
# to a previous irrelevant view.
self.axes.figure.canvas.toolbar.update()
self.redraw()
class ImageSubsetArray(object):
def __init__(self, viewer_state, layer_artist):
self._viewer_state = weakref.ref(viewer_state)
self._layer_artist = weakref.ref(layer_artist)
self._layer_state = weakref.ref(layer_artist.state)
@property
def layer_artist(self):
return self._layer_artist()
@property
def layer_state(self):
return self._layer_state()
@property
def viewer_state(self):
return self._viewer_state()
@property
def shape(self):
x_axis = self.viewer_state.x_att.axis
y_axis = self.viewer_state.y_att.axis
full_shape = self.layer_state.layer.shape
return full_shape[y_axis], full_shape[x_axis]
@property
def nan_array(self):
return np.ones(self.shape) * np.nan
def __getitem__(self, view=None):
if (self.layer_artist is None or
self.layer_state is None or
self.viewer_state is None):
return self.nan_array
if not self.layer_artist._compatible_with_reference_data:
return self.nan_array
try:
mask = self.layer_state.get_sliced_data(view=view)
except IncompatibleAttribute:
self.layer_artist.disable_incompatible_subset()
return self.nan_array
else:
self.layer_artist.enable()
r, g, b = color2rgb(self.layer_state.color)
mask = np.dstack((r * mask, g * mask, b * mask, mask * .5))
mask = (255 * mask).astype(np.uint8)
return mask
@property
def dtype(self):
return np.uint8
@property
def ndim(self):
return 2
@property
def size(self):
return np.product(self.shape)
class ImageSubsetLayerArtist(BaseImageLayerArtist):
_layer_state_cls = ImageSubsetLayerState
def __init__(self, axes, viewer_state, layer_state=None, layer=None):
super(ImageSubsetLayerArtist, self).__init__(axes, viewer_state,
layer_state=layer_state, layer=layer)
self.subset_array = ImageSubsetArray(self._viewer_state, self)
self.image_artist = imshow(self.axes, self.subset_array,
origin='lower', interpolation='nearest',
vmin=0, vmax=1, aspect=self._viewer_state.aspect)
self.mpl_artists = [self.image_artist]
@defer_draw
def _update_visual_attributes(self):
if not self.enabled:
return
# TODO: deal with color using a colormap instead of having to change data
self.image_artist.set_visible(self.state.visible)
self.image_artist.set_zorder(self.state.zorder)
self.image_artist.set_alpha(self.state.alpha)
self.redraw()
def _update_image(self, force=False, **kwargs):
if self.state.layer is None:
return
# Figure out which attributes are different from before. Ideally we shouldn't
# need this but currently this method is called multiple times if an
# attribute is changed due to x_att changing then hist_x_min, hist_x_max, etc.
# If we can solve this so that _update_histogram is really only called once
# then we could consider simplifying this. Until then, we manually keep track
# of which properties have changed.
changed = set()
if not force:
for key, value in self._viewer_state.as_dict().items():
if value != self._last_viewer_state.get(key, None):
changed.add(key)
for key, value in self.state.as_dict().items():
if value != self._last_layer_state.get(key, None):
changed.add(key)
self._last_viewer_state.update(self._viewer_state.as_dict())
self._last_layer_state.update(self.state.as_dict())
if 'reference_data' in changed or 'layer' in changed:
self._update_compatibility()
if force or any(prop in changed for prop in ('layer', 'attribute', 'color',
'x_att', 'y_att', 'slices')):
self.image_artist.invalidate_cache()
self.redraw() # forces subset to be recomputed
force = True # make sure scaling and visual attributes are updated
if force or any(prop in changed for prop in ('zorder', 'visible', 'alpha')):
self._update_visual_attributes()
def enable(self):
super(ImageSubsetLayerArtist, self).enable()
# We need to now ensure that image_artist, which may have been marked
# as not being visible when the layer was cleared is made visible
# again.
if hasattr(self, 'image_artist'):
self.image_artist.invalidate_cache()
self._update_visual_attributes()
@defer_draw
def update(self):
# TODO: determine why this gets called when changing the transparency slider
self._update_image(force=True)
self.redraw()
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/viewers/image/layer_artist.py",
"copies": "1",
"size": "13979",
"license": "bsd-3-clause",
"hash": -5157392344947443000,
"line_mean": 34.0350877193,
"line_max": 90,
"alpha_frac": 0.5943200515,
"autogenerated": false,
"ratio": 4.2592931139549055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006192690479312569,
"num_lines": 399
} |
from __future__ import (absolute_import, division, print_function)
import uuid
from odm2api import serviceBase
from odm2api.models import TimeSeriesResultValues
__author__ = 'sreeder'
class CreateODM2(serviceBase):
# Annotations
def create(self, value):
self._session.add(value)
self._session.commit()
return value
def createAll(self, values):
self._session.add_all(values)
self._session.commit()
return values
def createVariable(self, var):
self._session.add(var)
self._session.commit()
return var
def createMethod(self, method):
self._session.add(method)
self._session.commit()
return method
def createProcessingLevel(self, proclevel):
self._session.add(proclevel)
self._session.commit()
return proclevel
def createSamplingFeature(self, samplingfeature):
if samplingfeature.SamplingFeatureUUID is None:
samplingfeature.SamplingFeatureUUID = str(uuid.uuid1())
self._session.add(samplingfeature)
self._session.commit()
return samplingfeature
def createUnit(self, unit):
self._session.add(unit)
self._session.commit()
return unit
def createOrganization(self, org):
self._session.add(org)
self._session.commit()
return org
def createPerson(self, person):
self._session.add(person)
self._session.commit()
return person
def createAffiliation(self, affiliation):
self._session.add(affiliation)
self._session.commit()
return affiliation
def createDataset(self, dataset):
self._session.add(dataset)
self._session.commit()
return dataset
def createDatasetResults(self, datasetresult):
self._session.add(datasetresult)
self._session.commit()
return datasetresult
def createAction(self, action):
self._session.add(action)
self._session.commit()
return action
def createActionby(self, actionby):
self._session.add(actionby)
self._session.commit()
return actionby
def createFeatureAction(self, action):
self._session.add(action)
self._session.commit()
return action
def createAnnotations(self, anno):
self._session.add(anno)
self._session.commit()
return anno
def createRelatedAction(self, relatedaction):
self._session.add(relatedaction)
self._session.commit()
return relatedaction
def createResult(self, result):
if result.ResultUUID is None:
result.ResultUUID = str(uuid.uuid1())
self._session.add(result)
self._session.commit()
return result
def createResultValue(self, value):
self._session.add(value)
self._session.commit()
self._session.flush()
return value
def createSpatialReference(self, spatialref):
self._session.add(spatialref)
self._session.commit()
return spatialref
def createModel(self, model):
self._session.add(model)
self._session.commit()
return model
def createRelatedModel(self, relatedmodel):
self._session.add(relatedmodel)
self._session.commit()
return relatedmodel
def createSimulation(self, simulation):
self._session.add(simulation)
self._session.commit()
return simulation
def createTimeSeriesResultValues(self, datavalues):
try:
# FXIME: F841 local variable 'tablename' is assigned to but never used.
# tablename = TimeSeriesResultValues.__tablename__
datavalues.to_sql(
name='TimeSeriesResultValues',
schema=TimeSeriesResultValues.__table_args__['schema'],
if_exists='append',
chunksize=1000,
con=self._session_factory.engine,
index=False
)
self._session.commit()
return datavalues
except Exception as e:
print(e)
return None
| {
"repo_name": "ODM2/ODM2PythonAPI",
"path": "odm2api/services/createService.py",
"copies": "2",
"size": "4187",
"license": "bsd-3-clause",
"hash": 2374704380582786600,
"line_mean": 26.1883116883,
"line_max": 83,
"alpha_frac": 0.6166706472,
"autogenerated": false,
"ratio": 4.440084835630965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6056755482830964,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import uuid
import numpy as np
from glue.core.exceptions import IncompatibleAttribute
from glue.utils import categorical_ndarray
from .multi_scatter import MultiColorScatter
from .layer_state import ScatterLayerState
from ..common.layer_artist import VispyLayerArtist
COLOR_PROPERTIES = set(['color_mode', 'cmap_attribute', 'cmap_vmin', 'cmap_vmax', 'cmap', 'color'])
SIZE_PROPERTIES = set(['size_mode', 'size_attribute', 'size_vmin', 'size_vmax',
'size_scaling', 'size'])
ALPHA_PROPERTIES = set(['alpha'])
DATA_PROPERTIES = set(['layer', 'x_att', 'y_att', 'z_att'])
VISIBLE_PROPERTIES = set(['visible'])
class ScatterLayerArtist(VispyLayerArtist):
"""
A layer artist to render 3d scatter plots.
"""
def __init__(self, vispy_viewer, layer=None, layer_state=None):
super(ScatterLayerArtist, self).__init__(layer)
self._clip_limits = None
# Set data caches
self._marker_data = None
self._color_data = None
self._size_data = None
self.layer = layer or layer_state.layer
self.vispy_widget = vispy_viewer._vispy_widget
# TODO: need to remove layers when layer artist is removed
self._viewer_state = vispy_viewer.state
self.state = layer_state or ScatterLayerState(layer=self.layer)
if self.state not in self._viewer_state.layers:
self._viewer_state.layers.append(self.state)
# We create a unique ID for this layer artist, that will be used to
# refer to the layer artist in the MultiColorScatter. We have to do this
# rather than use self.id because we can't guarantee the latter is
# unique.
self.id = str(uuid.uuid4())
# We need to use MultiColorScatter instance to store scatter plots, but
# we should only have one per canvas. Therefore, we store the
# MultiColorScatter instance in the vispy viewer instance.
if not hasattr(self.vispy_widget, '_multiscat'):
multiscat = MultiColorScatter()
multiscat.set_gl_state(depth_test=False,
blend=True,
blend_func=('src_alpha', 'one_minus_src_alpha'))
self.vispy_widget.add_data_visual(multiscat)
self.vispy_widget._multiscat = multiscat
self._multiscat = self.vispy_widget._multiscat
self._multiscat.allocate(self.id)
self._multiscat.set_zorder(self.id, self.get_zorder)
# Watch for changes in the viewer state which would require the
# layers to be redrawn
self._viewer_state.add_global_callback(self._update_scatter)
self.state.add_global_callback(self._update_scatter)
self.reset_cache()
def reset_cache(self):
self._last_viewer_state = {}
self._last_layer_state = {}
@property
def visual(self):
return self._multiscat
def get_zorder(self):
return self.zorder
def get_layer_color(self):
if self.state.color_mode == 'Fixed':
return self.state.color
else:
return self.state.cmap
def redraw(self):
"""
Redraw the Vispy canvas
"""
if self._multiscat is not None:
self._multiscat._update()
self.vispy_widget.canvas.update()
def clear(self):
"""
Clear the visualization for this layer
"""
self._multiscat.set_data_values(self.id, [], [], [])
def remove(self):
"""
Remove the layer artist from the visualization
"""
if self._multiscat is None:
return
self._multiscat.deallocate(self.id)
self._multiscat = None
self._viewer_state.remove_global_callback(self._update_scatter)
self.state.remove_global_callback(self._update_scatter)
def _update_sizes(self):
if self.state.size_mode is None:
pass
elif self.state.size_mode == 'Fixed':
self._multiscat.set_size(self.id, self.state.size * self.state.size_scaling)
else:
data = self.layer[self.state.size_attribute].ravel()
if isinstance(data, categorical_ndarray):
data = data.codes
if self.state.size_vmax == self.state.size_vmin:
size = np.ones(data.shape) * 10
else:
size = (20 * (data - self.state.size_vmin) /
(self.state.size_vmax - self.state.size_vmin))
size_data = size * self.state.size_scaling
size_data[np.isnan(data)] = 0.
self._multiscat.set_size(self.id, size_data)
def _update_colors(self):
if self.state.color_mode is None:
pass
elif self.state.color_mode == 'Fixed':
self._multiscat.set_color(self.id, self.state.color)
else:
data = self.layer[self.state.cmap_attribute].ravel()
if isinstance(data, categorical_ndarray):
data = data.codes
if self.state.cmap_vmax == self.state.cmap_vmin:
cmap_data = np.ones(data.shape) * 0.5
else:
cmap_data = ((data - self.state.cmap_vmin) /
(self.state.cmap_vmax - self.state.cmap_vmin))
cmap_data = self.state.cmap(cmap_data)
cmap_data[:, 3][np.isnan(data)] = 0.
self._multiscat.set_color(self.id, cmap_data)
def _update_alpha(self):
self._multiscat.set_alpha(self.id, self.state.alpha)
def _update_data(self, event=None):
try:
x = self.layer[self._viewer_state.x_att].ravel()
y = self.layer[self._viewer_state.y_att].ravel()
z = self.layer[self._viewer_state.z_att].ravel()
except AttributeError:
return
except (IncompatibleAttribute, IndexError):
# The following includes a call to self.clear()
self.disable_invalid_attributes(self._viewer_state.x_att,
self._viewer_state.y_att,
self._viewer_state.z_att)
return
else:
self._enabled = True
self._marker_data = np.array([x, y, z]).transpose()
# We need to make sure we update the sizes and colors in case
# these were set as arrays, since the size of the data might have
# changed (in the case of subsets)
self._multiscat.set_data_values(self.id, x, y, z)
# Mask points outside the clip limits
if self._clip_limits is None:
self._multiscat.set_mask(self.id, None)
else:
xmin, xmax, ymin, ymax, zmin, zmax = self._clip_limits
keep = (x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax) & (z >= zmin) & (z <= zmax)
self._multiscat.set_mask(self.id, keep)
self.redraw()
def _update_visibility(self):
self._multiscat.set_visible(self.id, self.visible)
self.redraw()
@property
def default_limits(self):
if self._marker_data is None:
raise ValueError("Data not yet set")
dmin = np.nanmin(self._marker_data, axis=0)
dmax = np.nanmax(self._marker_data, axis=0)
# TODO: the following can be optimized
return tuple(np.array([dmin, dmax]).transpose().ravel())
def set_clip(self, limits):
self._clip_limits = limits
self._update_data()
def _update_scatter(self, force=False, **kwargs):
if (self._viewer_state.x_att is None or
self._viewer_state.y_att is None or
self._viewer_state.z_att is None or
self.state.layer is None):
return
# Figure out which attributes are different from before. Ideally we shouldn't
# need this but currently this method is called multiple times if an
# attribute is changed due to x_att changing then hist_x_min, hist_x_max, etc.
# If we can solve this so that _update_histogram is really only called once
# then we could consider simplifying this. Until then, we manually keep track
# of which properties have changed.
changed = set()
if not force:
for key, value in self._viewer_state.as_dict().items():
if value != self._last_viewer_state.get(key, None):
changed.add(key)
for key, value in self.state.as_dict().items():
if value != self._last_layer_state.get(key, None):
changed.add(key)
self._last_viewer_state.update(self._viewer_state.as_dict())
self._last_layer_state.update(self.state.as_dict())
if force or len(changed & DATA_PROPERTIES) > 0:
self._update_data()
force = True
if force or len(changed & SIZE_PROPERTIES) > 0:
self._update_sizes()
if force or len(changed & COLOR_PROPERTIES) > 0:
self._update_colors()
if force or len(changed & ALPHA_PROPERTIES) > 0:
self._update_alpha()
if force or len(changed & VISIBLE_PROPERTIES) > 0:
self._update_visibility()
def update(self):
with self._multiscat.delay_update():
self._update_scatter(force=True)
self.redraw()
| {
"repo_name": "astrofrog/glue-vispy-viewers",
"path": "glue_vispy_viewers/scatter/layer_artist.py",
"copies": "2",
"size": "9420",
"license": "bsd-2-clause",
"hash": 4064415048604583000,
"line_mean": 35.091954023,
"line_max": 100,
"alpha_frac": 0.5847133758,
"autogenerated": false,
"ratio": 3.843329253365973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5428042629165973,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import uuid
import numpy as np
from glue.utils import nonpartial
from glue.core.exceptions import IncompatibleAttribute
from .multi_scatter import MultiColorScatter
from .layer_state import ScatterLayerState
from ..common.layer_artist import VispyLayerArtist
class ScatterLayerArtist(VispyLayerArtist):
"""
A layer artist to render 3d scatter plots.
"""
def __init__(self, vispy_viewer, layer=None, layer_state=None):
super(ScatterLayerArtist, self).__init__(layer)
self._clip_limits = None
self._marker_keep = Ellipsis
# Set data caches
self._marker_data = None
self._color_data = None
self._size_data = None
self.layer = layer or layer_state.layer
self.vispy_viewer = vispy_viewer
self.vispy_widget = vispy_viewer._vispy_widget
# TODO: need to remove layers when layer artist is removed
self._viewer_state = vispy_viewer.state
self.state = layer_state or ScatterLayerState(layer=self.layer)
if self.state not in self._viewer_state.layers:
self._viewer_state.layers.append(self.state)
# We create a unique ID for this layer artist, that will be used to
# refer to the layer artist in the MultiColorScatter. We have to do this
# rather than use self.id because we can't guarantee the latter is
# unique.
self.id = str(uuid.uuid4())
# We need to use MultiColorScatter instance to store scatter plots, but
# we should only have one per canvas. Therefore, we store the
# MultiColorScatter instance in the vispy viewer instance.
if not hasattr(self.vispy_widget, '_multiscat'):
multiscat = MultiColorScatter()
multiscat.set_gl_state(depth_test=False,
blend=True,
blend_func=('src_alpha', 'one_minus_src_alpha'))
self.vispy_widget.add_data_visual(multiscat)
self.vispy_widget._multiscat = multiscat
self._multiscat = self.vispy_widget._multiscat
self._multiscat.allocate(self.id)
self._multiscat.set_zorder(self.id, self.get_zorder)
try:
self.state.add_callback('*', self._update_from_state, as_kwargs=True)
except TypeError: # glue-core >= 0.11
self.state.add_global_callback(self._update_from_state)
self._update_from_state(**self.state.as_dict())
self._viewer_state.add_callback('x_att', nonpartial(self._update_data))
self._viewer_state.add_callback('y_att', nonpartial(self._update_data))
self._viewer_state.add_callback('z_att', nonpartial(self._update_data))
self._update_data()
self.visible = True
@property
def visual(self):
return self._multiscat
def _update_visibility(self):
self._multiscat.set_visible(self.id, self.visible)
self.redraw()
def get_zorder(self):
return self.zorder
def redraw(self):
"""
Redraw the Vispy canvas
"""
self._multiscat._update()
self.vispy_widget.canvas.update()
def clear(self):
"""
Remove the layer artist from the visualization
"""
self._multiscat.set_data_values(self.id, [], [], [])
def update(self):
"""
Update the visualization to reflect the underlying data
"""
self.redraw()
self._changed = False
def _update_from_state(self, **props):
if any('size' in prop for prop in props):
self._update_sizes()
if any('color' in prop or 'cmap' in prop for prop in props):
self._update_colors()
if 'alpha' in props:
self._update_alpha()
self.redraw()
def _update_sizes(self):
if self.state.size_mode is None:
pass
elif self.state.size_mode == 'Fixed':
self._multiscat.set_size(self.id, self.state.size * self.state.size_scaling)
else:
data = self.layer[self.state.size_attribute].ravel()
data = data[self._marker_keep]
if self.state.size_vmax == self.state.size_vmin:
size = np.ones(data.shape) * 10
else:
size = (20 * (data - self.state.size_vmin) /
(self.state.size_vmax - self.state.size_vmin))
size_data = size * self.state.size_scaling
size_data[np.isnan(data)] = 0.
self._multiscat.set_size(self.id, size_data)
def _update_colors(self):
if self.state.color_mode is None:
pass
elif self.state.color_mode == 'Fixed':
self._multiscat.set_color(self.id, self.state.color)
else:
data = self.layer[self.state.cmap_attribute].ravel()
data = data[self._marker_keep]
if self.state.cmap_vmax == self.state.cmap_vmin:
cmap_data = np.ones(data.shape) * 0.5
else:
cmap_data = ((data - self.state.cmap_vmin) /
(self.state.cmap_vmax - self.state.cmap_vmin))
cmap_data = self.state.cmap(cmap_data)
cmap_data[:, 3][np.isnan(data)] = 0.
self._multiscat.set_color(self.id, cmap_data)
def _update_alpha(self):
self._multiscat.set_alpha(self.id, self.state.alpha)
def _update_data(self):
try:
x = self.layer[self._viewer_state.x_att].ravel()
y = self.layer[self._viewer_state.y_att].ravel()
z = self.layer[self._viewer_state.z_att].ravel()
except AttributeError:
return
except (IncompatibleAttribute, IndexError):
# The following includes a call to self.clear()
self.disable_invalid_attributes(self._viewer_state.x_att,
self._viewer_state.y_att,
self._viewer_state.z_att)
return
else:
self._enabled = True
if self._clip_limits is None:
keep = Ellipsis
else:
xmin, xmax, ymin, ymax, zmin, zmax = self._clip_limits
keep = (x >= xmin) & (x <= xmax) & (y >= ymin) & (y <= ymax) & (z >= zmin) & (z <= zmax)
x, y, z = x[keep], y[keep], z[keep]
self._marker_data = np.array([x, y, z]).transpose()
self._marker_keep = keep
# We need to make sure we update the sizes and colors in case
# these were set as arrays, since the size of the data might have
# changed (in the case of subsets)
with self._multiscat.delay_update():
self._multiscat.set_data_values(self.id, x, y, z)
self._update_sizes()
self._update_colors()
self.redraw()
@property
def default_limits(self):
if self._marker_data is None:
raise ValueError("Data not yet set")
dmin = np.nanmin(self._marker_data, axis=0)
dmax = np.nanmax(self._marker_data, axis=0)
# TODO: the following can be optimized
return tuple(np.array([dmin, dmax]).transpose().ravel())
def set_clip(self, limits):
self._clip_limits = limits
self._update_data()
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/scatter/layer_artist.py",
"copies": "1",
"size": "7350",
"license": "bsd-2-clause",
"hash": 2502536197070844000,
"line_mean": 34.6796116505,
"line_max": 100,
"alpha_frac": 0.5783673469,
"autogenerated": false,
"ratio": 3.7867078825347757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9863677595744884,
"avg_score": 0.00027952673797823673,
"num_lines": 206
} |
from __future__ import absolute_import, division, print_function
import warnings
from collections import defaultdict
import numpy as np
import pandas as pd
from .coding import times, strings, variables
from .coding.variables import SerializationWarning
from .core import duck_array_ops, indexing
from .core.pycompat import (
OrderedDict, basestring, bytes_type, iteritems, dask_array_type,
unicode_type)
from .core.variable import IndexVariable, Variable, as_variable
class NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin):
"""Decode arrays on the fly from non-native to native endianness
This is useful for decoding arrays from netCDF3 files (which are all
big endian) into native endianness, so they can be used with Cython
functions, such as those found in bottleneck and pandas.
>>> x = np.arange(5, dtype='>i2')
>>> x.dtype
dtype('>i2')
>>> NativeEndianArray(x).dtype
dtype('int16')
>>> NativeEndianArray(x)[:].dtype
dtype('int16')
"""
def __init__(self, array):
self.array = indexing.as_indexable(array)
@property
def dtype(self):
return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin):
"""Decode arrays on the fly from integer to boolean datatype
This is useful for decoding boolean arrays from integer typed netCDF
variables.
>>> x = np.array([1, 0, 1, 1, 0], dtype='i1')
>>> x.dtype
dtype('>i2')
>>> BoolTypeArray(x).dtype
dtype('bool')
>>> BoolTypeArray(x)[:].dtype
dtype('bool')
"""
def __init__(self, array):
self.array = indexing.as_indexable(array)
@property
def dtype(self):
return np.dtype('bool')
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
def _var_as_tuple(var):
return var.dims, var.data, var.attrs.copy(), var.encoding.copy()
def maybe_encode_nonstring_dtype(var, name=None):
if 'dtype' in var.encoding and var.encoding['dtype'] != 'S1':
dims, data, attrs, encoding = _var_as_tuple(var)
dtype = np.dtype(encoding.pop('dtype'))
if dtype != var.dtype:
if np.issubdtype(dtype, np.integer):
if (np.issubdtype(var.dtype, np.floating) and
'_FillValue' not in var.attrs):
warnings.warn('saving variable %s with floating '
'point data as an integer dtype without '
'any _FillValue to use for NaNs' % name,
SerializationWarning, stacklevel=3)
data = duck_array_ops.around(data)[...]
data = data.astype(dtype=dtype)
var = Variable(dims, data, attrs, encoding)
return var
def maybe_default_fill_value(var):
# make NaN the fill value for float types:
if ('_FillValue' not in var.attrs and
'_FillValue' not in var.encoding and
np.issubdtype(var.dtype, np.floating)):
var.attrs['_FillValue'] = var.dtype.type(np.nan)
return var
def maybe_encode_bools(var):
if ((var.dtype == np.bool) and
('dtype' not in var.encoding) and ('dtype' not in var.attrs)):
dims, data, attrs, encoding = _var_as_tuple(var)
attrs['dtype'] = 'bool'
data = data.astype(dtype='i1', copy=True)
var = Variable(dims, data, attrs, encoding)
return var
def _infer_dtype(array, name=None):
"""Given an object array with no missing values, infer its dtype from its
first element
"""
if array.dtype.kind != 'O':
raise TypeError('infer_type must be called on a dtype=object array')
if array.size == 0:
return np.dtype(float)
element = array[(0,) * array.ndim]
if isinstance(element, (bytes_type, unicode_type)):
return strings.create_vlen_dtype(type(element))
dtype = np.array(element).dtype
if dtype.kind != 'O':
return dtype
raise ValueError('unable to infer dtype on variable {!r}; xarray '
'cannot serialize arbitrary Python objects'
.format(name))
def ensure_not_multiindex(var, name=None):
if (isinstance(var, IndexVariable) and
isinstance(var.to_index(), pd.MultiIndex)):
raise NotImplementedError(
'variable {!r} is a MultiIndex, which cannot yet be '
'serialized to netCDF files '
'(https://github.com/pydata/xarray/issues/1077). Use '
'reset_index() to convert MultiIndex levels into coordinate '
'variables instead.'.format(name))
def _copy_with_dtype(data, dtype):
"""Create a copy of an array with the given dtype.
We use this instead of np.array() to ensure that custom object dtypes end
up on the resulting array.
"""
result = np.empty(data.shape, dtype)
result[...] = data
return result
def ensure_dtype_not_object(var, name=None):
# TODO: move this from conventions to backends? (it's not CF related)
if var.dtype.kind == 'O':
dims, data, attrs, encoding = _var_as_tuple(var)
if isinstance(data, dask_array_type):
warnings.warn(
'variable {} has data in the form of a dask array with '
'dtype=object, which means it is being loaded into memory '
'to determine a data type that can be safely stored on disk. '
'To avoid this, coerce this variable to a fixed-size dtype '
'with astype() before saving it.'.format(name),
SerializationWarning)
data = data.compute()
missing = pd.isnull(data)
if missing.any():
# nb. this will fail for dask.array data
non_missing_values = data[~missing]
inferred_dtype = _infer_dtype(non_missing_values, name)
# There is no safe bit-pattern for NA in typical binary string
# formats, we so can't set a fill_value. Unfortunately, this means
# we can't distinguish between missing values and empty strings.
if strings.is_bytes_dtype(inferred_dtype):
fill_value = b''
elif strings.is_unicode_dtype(inferred_dtype):
fill_value = u''
else:
# insist on using float for numeric values
if not np.issubdtype(inferred_dtype, np.floating):
inferred_dtype = np.dtype(float)
fill_value = inferred_dtype.type(np.nan)
data = _copy_with_dtype(data, dtype=inferred_dtype)
data[missing] = fill_value
else:
data = _copy_with_dtype(data, dtype=_infer_dtype(data, name))
assert data.dtype.kind != 'O' or data.dtype.metadata
var = Variable(dims, data, attrs, encoding)
return var
def encode_cf_variable(var, needs_copy=True, name=None):
"""
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : xarray.Variable
A variable holding un-encoded data.
Returns
-------
out : xarray.Variable
A variable which has been encoded as described above.
"""
ensure_not_multiindex(var, name=name)
for coder in [times.CFDatetimeCoder(),
times.CFTimedeltaCoder(),
variables.CFScaleOffsetCoder(),
variables.CFMaskCoder(),
variables.UnsignedIntegerCoder()]:
var = coder.encode(var, name=name)
# TODO(shoyer): convert all of these to use coders, too:
var = maybe_encode_nonstring_dtype(var, name=name)
var = maybe_default_fill_value(var)
var = maybe_encode_bools(var)
var = ensure_dtype_not_object(var, name=name)
return var
def decode_cf_variable(name, var, concat_characters=True, mask_and_scale=True,
decode_times=True, decode_endianness=True,
stack_char_dim=True):
"""
Decodes a variable which may hold CF encoded information.
This includes variables that have been masked and scaled, which
hold CF style time variables (this is almost always the case if
the dataset has been serialized) and which have strings encoded
as character arrays.
Parameters
----------
name: str
Name of the variable. Used for better error messages.
var : Variable
A variable holding potentially CF encoded information.
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue). If the _Unsigned attribute is present
treat integer arrays as unsigned.
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
decode_endianness : bool
Decode arrays from non-native to native endianness.
stack_char_dim : bool
Whether to stack characters into bytes along the last dimension of this
array. Passed as an argument because we need to look at the full
dataset to figure out if this is appropriate.
Returns
-------
out : Variable
A variable holding the decoded equivalent of var.
"""
var = as_variable(var)
original_dtype = var.dtype
if concat_characters:
if stack_char_dim:
var = strings.CharacterArrayCoder().decode(var, name=name)
var = strings.EncodedStringCoder().decode(var)
if mask_and_scale:
for coder in [variables.UnsignedIntegerCoder(),
variables.CFMaskCoder(),
variables.CFScaleOffsetCoder()]:
var = coder.decode(var, name=name)
if decode_times:
for coder in [times.CFTimedeltaCoder(),
times.CFDatetimeCoder()]:
var = coder.decode(var, name=name)
dimensions, data, attributes, encoding = (
variables.unpack_for_decoding(var))
# TODO(shoyer): convert everything below to use coders
if decode_endianness and not data.dtype.isnative:
# do this last, so it's only done if we didn't already unmask/scale
data = NativeEndiannessArray(data)
original_dtype = data.dtype
if 'dtype' in encoding:
if original_dtype != encoding['dtype']:
warnings.warn("CF decoding is overwriting dtype on variable {!r}"
.format(name))
else:
encoding['dtype'] = original_dtype
if 'dtype' in attributes and attributes['dtype'] == 'bool':
del attributes['dtype']
data = BoolTypeArray(data)
if not isinstance(data, dask_array_type):
data = indexing.LazilyOuterIndexedArray(data)
return Variable(dimensions, data, attributes, encoding=encoding)
def decode_cf_variables(variables, attributes, concat_characters=True,
mask_and_scale=True, decode_times=True,
decode_coords=True, drop_variables=None):
"""
Decode a several CF encoded variables.
See: decode_cf_variable
"""
dimensions_used_by = defaultdict(list)
for v in variables.values():
for d in v.dims:
dimensions_used_by[d].append(v)
def stackable(dim):
# figure out if a dimension can be concatenated over
if dim in variables:
return False
for v in dimensions_used_by[dim]:
if v.dtype.kind != 'S' or dim != v.dims[-1]:
return False
return True
coord_names = set()
if isinstance(drop_variables, basestring):
drop_variables = [drop_variables]
elif drop_variables is None:
drop_variables = []
drop_variables = set(drop_variables)
new_vars = OrderedDict()
for k, v in iteritems(variables):
if k in drop_variables:
continue
stack_char_dim = (concat_characters and v.dtype == 'S1' and
v.ndim > 0 and stackable(v.dims[-1]))
new_vars[k] = decode_cf_variable(
k, v, concat_characters=concat_characters,
mask_and_scale=mask_and_scale, decode_times=decode_times,
stack_char_dim=stack_char_dim)
if decode_coords:
var_attrs = new_vars[k].attrs
if 'coordinates' in var_attrs:
coord_str = var_attrs['coordinates']
var_coord_names = coord_str.split()
if all(k in variables for k in var_coord_names):
new_vars[k].encoding['coordinates'] = coord_str
del var_attrs['coordinates']
coord_names.update(var_coord_names)
if decode_coords and 'coordinates' in attributes:
attributes = OrderedDict(attributes)
coord_names.update(attributes.pop('coordinates').split())
return new_vars, attributes, coord_names
def decode_cf(obj, concat_characters=True, mask_and_scale=True,
decode_times=True, decode_coords=True, drop_variables=None):
"""Decode the given Dataset or Datastore according to CF conventions into
a new Dataset.
Parameters
----------
obj : Dataset or DataStore
Object to decode.
concat_characters : bool, optional
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool, optional
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool, optional
Decode cf times (e.g., integers since 'hours since 2000-01-01') to
np.datetime64.
decode_coords : bool, optional
Use the 'coordinates' attribute on variable (or the dataset itself) to
identify coordinates.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
Returns
-------
decoded : Dataset
"""
from .core.dataset import Dataset
from .backends.common import AbstractDataStore
if isinstance(obj, Dataset):
vars = obj._variables
attrs = obj.attrs
extra_coords = set(obj.coords)
file_obj = obj._file_obj
encoding = obj.encoding
elif isinstance(obj, AbstractDataStore):
vars, attrs = obj.load()
extra_coords = set()
file_obj = obj
encoding = obj.get_encoding()
else:
raise TypeError('can only decode Dataset or DataStore objects')
vars, attrs, coord_names = decode_cf_variables(
vars, attrs, concat_characters, mask_and_scale, decode_times,
decode_coords, drop_variables=drop_variables)
ds = Dataset(vars, attrs=attrs)
ds = ds.set_coords(coord_names.union(extra_coords).intersection(vars))
ds._file_obj = file_obj
ds.encoding = encoding
return ds
def cf_decoder(variables, attributes,
concat_characters=True, mask_and_scale=True,
decode_times=True):
"""
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
Returns
-------
decoded_variables : dict
A dictionary mapping from variable name to xarray.Variable objects.
decoded_attributes : dict
A dictionary mapping from attribute name to values.
"""
variables, attributes, _ = decode_cf_variables(
variables, attributes, concat_characters, mask_and_scale, decode_times)
return variables, attributes
def _encode_coordinates(variables, attributes, non_dim_coord_names):
# calculate global and variable specific coordinates
non_dim_coord_names = set(non_dim_coord_names)
for name in list(non_dim_coord_names):
if isinstance(name, basestring) and ' ' in name:
warnings.warn(
'coordinate {!r} has a space in its name, which means it '
'cannot be marked as a coordinate on disk and will be '
'saved as a data variable instead'.format(name),
SerializationWarning, stacklevel=6)
non_dim_coord_names.discard(name)
global_coordinates = non_dim_coord_names.copy()
variable_coordinates = defaultdict(set)
for coord_name in non_dim_coord_names:
target_dims = variables[coord_name].dims
for k, v in variables.items():
if (k not in non_dim_coord_names and k not in v.dims and
set(target_dims) <= set(v.dims)):
variable_coordinates[k].add(coord_name)
global_coordinates.discard(coord_name)
variables = OrderedDict((k, v.copy(deep=False))
for k, v in variables.items())
# These coordinates are saved according to CF conventions
for var_name, coord_names in variable_coordinates.items():
attrs = variables[var_name].attrs
if 'coordinates' in attrs:
raise ValueError('cannot serialize coordinates because variable '
"%s already has an attribute 'coordinates'"
% var_name)
attrs['coordinates'] = ' '.join(map(str, coord_names))
# These coordinates are not associated with any particular variables, so we
# save them under a global 'coordinates' attribute so xarray can roundtrip
# the dataset faithfully. Because this serialization goes beyond CF
# conventions, only do it if necessary.
# Reference discussion:
# http://mailman.cgd.ucar.edu/pipermail/cf-metadata/2014/057771.html
if global_coordinates:
attributes = OrderedDict(attributes)
if 'coordinates' in attributes:
raise ValueError('cannot serialize coordinates because the global '
"attribute 'coordinates' already exists")
attributes['coordinates'] = ' '.join(map(str, global_coordinates))
return variables, attributes
def encode_dataset_coordinates(dataset):
"""Encode coordinates on the given dataset object into variable specific
and global attributes.
When possible, this is done according to CF conventions.
Parameters
----------
dataset : Dataset
Object to encode.
Returns
-------
variables : dict
attrs : dict
"""
non_dim_coord_names = set(dataset.coords) - set(dataset.dims)
return _encode_coordinates(dataset._variables, dataset.attrs,
non_dim_coord_names=non_dim_coord_names)
def cf_encoder(variables, attributes):
"""
A function which takes a dicts of variables and attributes
and encodes them to conform to CF conventions as much
as possible. This includes masking, scaling, character
array handling, and CF-time encoding.
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
Returns
-------
encoded_variables : dict
A dictionary mapping from variable name to xarray.Variable,
encoded_attributes : dict
A dictionary mapping from attribute name to value
See also: encode_cf_variable
"""
new_vars = OrderedDict((k, encode_cf_variable(v, name=k))
for k, v in iteritems(variables))
return new_vars, attributes
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/conventions.py",
"copies": "1",
"size": "20549",
"license": "apache-2.0",
"hash": -7202417387283354000,
"line_mean": 34.5519031142,
"line_max": 79,
"alpha_frac": 0.6239233053,
"autogenerated": false,
"ratio": 4.212587125871258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 578
} |
from __future__ import absolute_import, division, print_function
import warnings
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from . import duck_array_ops, dtypes, formatting, ops
from .arithmetic import SupportsArithmetic
from .pycompat import OrderedDict, basestring, dask_array_type, suppress
from .utils import Frozen, SortedKeysDict
class ImplementsArrayReduce(object):
@classmethod
def _reduce_method(cls, func, include_skipna, numeric_only):
if include_skipna:
def wrapped_func(self, dim=None, axis=None, skipna=None,
keep_attrs=False, **kwargs):
return self.reduce(func, dim, axis, keep_attrs=keep_attrs,
skipna=skipna, allow_lazy=True, **kwargs)
else:
def wrapped_func(self, dim=None, axis=None, keep_attrs=False,
**kwargs):
return self.reduce(func, dim, axis, keep_attrs=keep_attrs,
allow_lazy=True, **kwargs)
return wrapped_func
_reduce_extra_args_docstring = \
"""dim : str or sequence of str, optional
Dimension(s) over which to apply `{name}`.
axis : int or sequence of int, optional
Axis(es) over which to apply `{name}`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
`{name}` is calculated over axes."""
_cum_extra_args_docstring = \
"""dim : str or sequence of str, optional
Dimension over which to apply `{name}`.
axis : int or sequence of int, optional
Axis over which to apply `{name}`. Only one of the 'dim'
and 'axis' arguments can be supplied."""
class ImplementsDatasetReduce(object):
@classmethod
def _reduce_method(cls, func, include_skipna, numeric_only):
if include_skipna:
def wrapped_func(self, dim=None, keep_attrs=False, skipna=None,
**kwargs):
return self.reduce(func, dim, keep_attrs, skipna=skipna,
numeric_only=numeric_only, allow_lazy=True,
**kwargs)
else:
def wrapped_func(self, dim=None, keep_attrs=False, **kwargs):
return self.reduce(func, dim, keep_attrs,
numeric_only=numeric_only, allow_lazy=True,
**kwargs)
return wrapped_func
_reduce_extra_args_docstring = \
"""dim : str or sequence of str, optional
Dimension(s) over which to apply `{name}`. By default `{name}` is
applied over all dimensions."""
_cum_extra_args_docstring = \
"""dim : str or sequence of str, optional
Dimension over which to apply `{name}`.
axis : int or sequence of int, optional
Axis over which to apply `{name}`. Only one of the 'dim'
and 'axis' arguments can be supplied."""
class AbstractArray(ImplementsArrayReduce, formatting.ReprMixin):
"""Shared base class for DataArray and Variable."""
def __bool__(self):
return bool(self.values)
# Python 3 uses __bool__, Python 2 uses __nonzero__
__nonzero__ = __bool__
def __float__(self):
return float(self.values)
def __int__(self):
return int(self.values)
def __complex__(self):
return complex(self.values)
def __long__(self):
return long(self.values) # flake8: noqa
def __array__(self, dtype=None):
return np.asarray(self.values, dtype=dtype)
def __repr__(self):
return formatting.array_repr(self)
def _iter(self):
for n in range(len(self)):
yield self[n]
def __iter__(self):
if self.ndim == 0:
raise TypeError('iteration over a 0-d array')
return self._iter()
@property
def T(self):
return self.transpose()
def get_axis_num(self, dim):
"""Return axis number(s) corresponding to dimension(s) in this array.
Parameters
----------
dim : str or iterable of str
Dimension name(s) for which to lookup axes.
Returns
-------
int or tuple of int
Axis number or numbers corresponding to the given dimensions.
"""
if isinstance(dim, basestring):
return self._get_axis_num(dim)
else:
return tuple(self._get_axis_num(d) for d in dim)
def _get_axis_num(self, dim):
try:
return self.dims.index(dim)
except ValueError:
raise ValueError("%r not found in array dimensions %r" %
(dim, self.dims))
@property
def sizes(self):
"""Ordered mapping from dimension names to lengths.
Immutable.
See also
--------
Dataset.sizes
"""
return Frozen(OrderedDict(zip(self.dims, self.shape)))
class AttrAccessMixin(object):
"""Mixin class that allows getting keys with attribute access
"""
_initialized = False
@property
def _attr_sources(self):
"""List of places to look-up items for attribute-style access"""
return []
@property
def _item_sources(self):
"""List of places to look-up items for key-autocompletion """
return []
def __getattr__(self, name):
if name != '__setstate__':
# this avoids an infinite loop when pickle looks for the
# __setstate__ attribute before the xarray object is initialized
for source in self._attr_sources:
with suppress(KeyError):
return source[name]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, name))
def __setattr__(self, name, value):
if self._initialized:
try:
# Allow setting instance variables if they already exist
# (e.g., _attrs). We use __getattribute__ instead of hasattr
# to avoid key lookups with attribute-style access.
self.__getattribute__(name)
except AttributeError:
raise AttributeError(
"cannot set attribute %r on a %r object. Use __setitem__ "
"style assignment (e.g., `ds['name'] = ...`) instead to "
"assign variables." % (name, type(self).__name__))
object.__setattr__(self, name, value)
def __dir__(self):
"""Provide method name lookup and completion. Only provide 'public'
methods.
"""
extra_attrs = [item
for sublist in self._attr_sources
for item in sublist
if isinstance(item, basestring)]
return sorted(set(dir(type(self)) + extra_attrs))
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython.
See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion
For the details.
"""
item_lists = [item
for sublist in self._item_sources
for item in sublist
if isinstance(item, basestring)]
return list(set(item_lists))
def get_squeeze_dims(xarray_obj, dim, axis=None):
"""Get a list of dimensions to squeeze out.
"""
if dim is not None and axis is not None:
raise ValueError('cannot use both parameters `axis` and `dim`')
if dim is None and axis is None:
dim = [d for d, s in xarray_obj.sizes.items() if s == 1]
else:
if isinstance(dim, basestring):
dim = [dim]
if isinstance(axis, int):
axis = (axis, )
if isinstance(axis, tuple):
for a in axis:
if not isinstance(a, int):
raise ValueError(
'parameter `axis` must be int or tuple of int.')
alldims = list(xarray_obj.sizes.keys())
dim = [alldims[a] for a in axis]
if any(xarray_obj.sizes[k] > 1 for k in dim):
raise ValueError('cannot select a dimension to squeeze out '
'which has length greater than one')
return dim
class DataWithCoords(SupportsArithmetic, AttrAccessMixin):
"""Shared base class for Dataset and DataArray."""
def squeeze(self, dim=None, drop=False, axis=None):
"""Return a new object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
drop : bool, optional
If ``drop=True``, drop squeezed coordinates instead of making them
scalar.
axis : int, optional
Select the dimension to squeeze. Added for compatibility reasons.
Returns
-------
squeezed : same type as caller
This object, but with with all or a subset of the dimensions of
length 1 removed.
See Also
--------
numpy.squeeze
"""
dims = get_squeeze_dims(self, dim, axis)
return self.isel(drop=drop, **{d: 0 for d in dims})
def get_index(self, key):
"""Get an index for a dimension, with fall-back to a default RangeIndex
"""
if key not in self.dims:
raise KeyError(key)
try:
return self.indexes[key]
except KeyError:
# need to ensure dtype=int64 in case range is empty on Python 2
return pd.Index(range(self.sizes[key]), name=key, dtype=np.int64)
def _calc_assign_results(self, kwargs):
results = SortedKeysDict()
for k, v in kwargs.items():
if callable(v):
results[k] = v(self)
else:
results[k] = v
return results
def assign_coords(self, **kwargs):
"""Assign new coordinates to this object.
Returns a new object with all the original data in addition to the new
coordinates.
Parameters
----------
kwargs : keyword, value pairs
keywords are the variables names. If the values are callable, they
are computed on this object and assigned to new coordinate
variables. If the values are not callable, (e.g. a DataArray,
scalar, or array), they are simply assigned.
Returns
-------
assigned : same type as caller
A new object with the new coordinates in addition to the existing
data.
Examples
--------
Convert longitude coordinates from 0-359 to -180-179:
>>> da = xr.DataArray(np.random.rand(4),
... coords=[np.array([358, 359, 0, 1])],
... dims='lon')
>>> da
<xarray.DataArray (lon: 4)>
array([0.28298 , 0.667347, 0.657938, 0.177683])
Coordinates:
* lon (lon) int64 358 359 0 1
>>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180))
<xarray.DataArray (lon: 4)>
array([0.28298 , 0.667347, 0.657938, 0.177683])
Coordinates:
* lon (lon) int64 -2 -1 0 1
Notes
-----
Since ``kwargs`` is a dictionary, the order of your arguments may not
be preserved, and so the order of the new variables is not well
defined. Assigning multiple variables within the same ``assign_coords``
is possible, but you cannot reference other variables created within
the same ``assign_coords`` call.
See also
--------
Dataset.assign
"""
data = self.copy(deep=False)
results = self._calc_assign_results(kwargs)
data.coords.update(results)
return data
def assign_attrs(self, *args, **kwargs):
"""Assign new attrs to this object.
Returns a new object equivalent to self.attrs.update(*args, **kwargs).
Parameters
----------
args : positional arguments passed into ``attrs.update``.
kwargs : keyword arguments passed into ``attrs.update``.
Returns
-------
assigned : same type as caller
A new object with the new attrs in addition to the existing data.
See also
--------
Dataset.assign
"""
out = self.copy(deep=False)
out.attrs.update(*args, **kwargs)
return out
def pipe(self, func, *args, **kwargs):
"""
Apply func(self, *args, **kwargs)
This method replicates the pandas method of the same name.
Parameters
----------
func : function
function to apply to this xarray object (Dataset/DataArray).
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the xarray object.
args : positional arguments passed into ``func``.
kwargs : a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
xarray or pandas objects, e.g., instead of writing
>>> f(g(h(ds), arg1=a), arg2=b, arg3=c)
You can write
>>> (ds.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (ds.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
See Also
--------
pandas.DataFrame.pipe
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
msg = '%s is both the pipe target and a keyword argument' % target
raise ValueError(msg)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def groupby(self, group, squeeze=True):
"""Returns a GroupBy object for performing grouped operations.
Parameters
----------
group : str, DataArray or IndexVariable
Array whose unique values should be used to group this array. If a
string, must be the name of a variable contained in this dataset.
squeeze : boolean, optional
If "group" is a dimension of any arrays in this dataset, `squeeze`
controls whether the subarrays have a dimension of length 1 along
that dimension or if the dimension is squeezed out.
Returns
-------
grouped : GroupBy
A `GroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
Examples
--------
Calculate daily anomalies for daily data:
>>> da = xr.DataArray(np.linspace(0, 1826, num=1827),
... coords=[pd.date_range('1/1/2000', '31/12/2004',
... freq='D')],
... dims='time')
>>> da
<xarray.DataArray (time: 1827)>
array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
>>> da.groupby('time.dayofyear') - da.groupby('time.dayofyear').mean('time')
<xarray.DataArray (time: 1827)>
array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
dayofyear (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ...
See Also
--------
core.groupby.DataArrayGroupBy
core.groupby.DatasetGroupBy
"""
return self._groupby_cls(self, group, squeeze=squeeze)
def groupby_bins(self, group, bins, right=True, labels=None, precision=3,
include_lowest=False, squeeze=True):
"""Returns a GroupBy object for performing grouped operations.
Rather than using all unique values of `group`, the values are discretized
first by applying `pandas.cut` [1]_ to `group`.
Parameters
----------
group : str, DataArray or IndexVariable
Array whose binned values should be used to group this array. If a
string, must be the name of a variable contained in this dataset.
bins : int or array of scalars
If bins is an int, it defines the number of equal-width bins in the
range of x. However, in this case, the range of x is extended by .1%
on each side to include the min or max values of x. If bins is a
sequence it defines the bin edges allowing for non-uniform bin
width. No extension of the range of x is done in this case.
right : boolean, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, string bin labels are assigned by
`pandas.cut`.
precision : int
The precision at which to store and display the bins labels.
include_lowest : bool
Whether the first interval should be left-inclusive or not.
squeeze : boolean, optional
If "group" is a dimension of any arrays in this dataset, `squeeze`
controls whether the subarrays have a dimension of length 1 along
that dimension or if the dimension is squeezed out.
Returns
-------
grouped : GroupBy
A `GroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
The name of the group has the added suffix `_bins` in order to
distinguish it from the original variable.
References
----------
.. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html
"""
return self._groupby_cls(self, group, squeeze=squeeze, bins=bins,
cut_kwargs={'right': right, 'labels': labels,
'precision': precision,
'include_lowest': include_lowest})
def rolling(self, min_periods=None, center=False, **windows):
"""
Rolling window object.
Parameters
----------
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : boolean, default False
Set the labels at the center of the window.
**windows : dim=window
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
Returns
-------
Rolling object (core.rolling.DataArrayRolling for DataArray,
core.rolling.DatasetRolling for Dataset.)
Examples
--------
Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON:
>>> da = xr.DataArray(np.linspace(0, 11, num=12),
... coords=[pd.date_range('15/12/1999',
... periods=12, freq=pd.DateOffset(months=1))],
... dims='time')
>>> da
<xarray.DataArray (time: 12)>
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...
>>> da.rolling(time=3, center=True).mean()
<xarray.DataArray (time: 12)>
array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...
Remove the NaNs using ``dropna()``:
>>> da.rolling(time=3, center=True).mean().dropna('time')
<xarray.DataArray (time: 10)>
array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
Coordinates:
* time (time) datetime64[ns] 2000-01-15 2000-02-15 2000-03-15 ...
See Also
--------
core.rolling.DataArrayRolling
core.rolling.DatasetRolling
"""
return self._rolling_cls(self, min_periods=min_periods,
center=center, **windows)
def resample(self, freq=None, dim=None, how=None, skipna=None,
closed=None, label=None, base=0, keep_attrs=False, **indexer):
"""Returns a Resample object for performing resampling operations.
Handles both downsampling and upsampling. If any intervals contain no
values from the original object, they will be given the value ``NaN``.
Parameters
----------
skipna : bool, optional
Whether to skip missing values when aggregating in downsampling.
closed : 'left' or 'right', optional
Side of each interval to treat as closed.
label : 'left or 'right', optional
Side of each interval to use for labeling.
base : int, optional
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '24H' frequency, base could
range from 0 through 23.
keep_attrs : bool, optional
If True, the object's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**indexer : {dim: freq}
Dictionary with a key indicating the dimension name to resample
over and a value corresponding to the resampling frequency.
Returns
-------
resampled : same type as caller
This object resampled.
Examples
--------
Downsample monthly time-series data to seasonal data:
>>> da = xr.DataArray(np.linspace(0, 11, num=12),
... coords=[pd.date_range('15/12/1999',
... periods=12, freq=pd.DateOffset(months=1))],
... dims='time')
>>> da
<xarray.DataArray (time: 12)>
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...
>>> da.resample(time="Q-DEC").mean()
<xarray.DataArray (time: 4)>
array([ 1., 4., 7., 10.])
Coordinates:
* time (time) datetime64[ns] 2000-02-29 2000-05-31 2000-08-31 2000-11-30
Upsample monthly time-series data to daily data:
>>> da.resample(time='1D').interpolate('linear')
<xarray.DataArray (time: 337)>
array([ 0. , 0.032258, 0.064516, ..., 10.935484, 10.967742, 11. ])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 1999-12-16 1999-12-17 ...
References
----------
.. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
"""
from .dataarray import DataArray
from .resample import RESAMPLE_DIM
if dim is not None:
if how is None:
how = 'mean'
return self._resample_immediately(freq, dim, how, skipna, closed,
label, base, keep_attrs)
if (how is not None) and indexer:
raise TypeError("If passing an 'indexer' then 'dim' "
"and 'how' should not be used")
# More than one indexer is ambiguous, but we do in fact need one if
# "dim" was not provided, until the old API is fully deprecated
if len(indexer) != 1:
raise ValueError(
"Resampling only supported along single dimensions."
)
dim, freq = indexer.popitem()
if isinstance(dim, basestring):
dim_name = dim
dim = self[dim]
else:
raise TypeError("Dimension name should be a string; "
"was passed %r" % dim)
group = DataArray(dim, [(dim.dims, dim)], name=RESAMPLE_DIM)
grouper = pd.Grouper(freq=freq, closed=closed, label=label, base=base)
resampler = self._resample_cls(self, group=group, dim=dim_name,
grouper=grouper,
resample_dim=RESAMPLE_DIM)
return resampler
def _resample_immediately(self, freq, dim, how, skipna,
closed, label, base, keep_attrs):
"""Implement the original version of .resample() which immediately
executes the desired resampling operation. """
from .dataarray import DataArray
RESAMPLE_DIM = '__resample_dim__'
warnings.warn("\n.resample() has been modified to defer "
"calculations. Instead of passing 'dim' and "
"how=\"{how}\", instead consider using "
".resample({dim}=\"{freq}\").{how}('{dim}') ".format(
dim=dim, freq=freq, how=how),
DeprecationWarning, stacklevel=3)
if isinstance(dim, basestring):
dim = self[dim]
group = DataArray(dim, [(dim.dims, dim)], name=RESAMPLE_DIM)
grouper = pd.Grouper(freq=freq, how=how, closed=closed, label=label,
base=base)
gb = self._groupby_cls(self, group, grouper=grouper)
if isinstance(how, basestring):
f = getattr(gb, how)
if how in ['first', 'last']:
result = f(skipna=skipna, keep_attrs=keep_attrs)
elif how == 'count':
result = f(dim=dim.name, keep_attrs=keep_attrs)
else:
result = f(dim=dim.name, skipna=skipna, keep_attrs=keep_attrs)
else:
result = gb.reduce(how, dim=dim.name, keep_attrs=keep_attrs)
result = result.rename({RESAMPLE_DIM: dim.name})
return result
def where(self, cond, other=dtypes.NA, drop=False):
"""Filter elements from this object according to a condition.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic.
Parameters
----------
cond : DataArray or Dataset with boolean dtype
Locations at which to preserve this object's values.
other : scalar, DataArray or Dataset, optional
Value to use for locations in this object where ``cond`` is False.
By default, these locations filled with NA.
drop : boolean, optional
If True, coordinate labels that only correspond to False values of
the condition are dropped from the result. Mutually exclusive with
``other``.
Returns
-------
Same type as caller.
Examples
--------
>>> import numpy as np
>>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=('x', 'y'))
>>> a.where(a.x + a.y < 4)
<xarray.DataArray (x: 5, y: 5)>
array([[ 0., 1., 2., 3., nan],
[ 5., 6., 7., nan, nan],
[ 10., 11., nan, nan, nan],
[ 15., nan, nan, nan, nan],
[ nan, nan, nan, nan, nan]])
Dimensions without coordinates: x, y
>>> a.where(a.x + a.y < 5, -1)
<xarray.DataArray (x: 5, y: 5)>
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, -1],
[10, 11, 12, -1, -1],
[15, 16, -1, -1, -1],
[20, -1, -1, -1, -1]])
Dimensions without coordinates: x, y
>>> a.where(a.x + a.y < 4, drop=True)
<xarray.DataArray (x: 4, y: 4)>
array([[ 0., 1., 2., 3.],
[ 5., 6., 7., nan],
[ 10., 11., nan, nan],
[ 15., nan, nan, nan]])
Dimensions without coordinates: x, y
See also
--------
numpy.where : corresponding numpy function
where : equivalent function
"""
from .alignment import align
from .dataarray import DataArray
from .dataset import Dataset
if drop:
if other is not dtypes.NA:
raise ValueError('cannot set `other` if drop=True')
if not isinstance(cond, (Dataset, DataArray)):
raise TypeError("cond argument is %r but must be a %r or %r" %
(cond, Dataset, DataArray))
# align so we can use integer indexing
self, cond = align(self, cond)
# get cond with the minimal size needed for the Dataset
if isinstance(cond, Dataset):
clipcond = cond.to_array().any('variable')
else:
clipcond = cond
# clip the data corresponding to coordinate dims that are not used
nonzeros = zip(clipcond.dims, np.nonzero(clipcond.values))
indexers = {k: np.unique(v) for k, v in nonzeros}
self = self.isel(**indexers)
cond = cond.isel(**indexers)
return ops.where_method(self, cond, other)
def close(self):
"""Close any files linked to this object
"""
if self._file_obj is not None:
self._file_obj.close()
self._file_obj = None
def isin(self, test_elements):
"""Tests each value in the array for whether it is in the supplied list.
Parameters
----------
test_elements : array_like
The values against which to test each value of `element`.
This argument is flattened if an array or array_like.
See numpy notes for behavior with non-array-like parameters.
Returns
-------
isin : same as object, bool
Has the same shape as this object.
Examples
--------
>>> array = xr.DataArray([1, 2, 3], dims='x')
>>> array.isin([1, 3])
<xarray.DataArray (x: 3)>
array([ True, False, True])
Dimensions without coordinates: x
See also
--------
numpy.isin
"""
from .computation import apply_ufunc
from .dataset import Dataset
from .dataarray import DataArray
from .variable import Variable
if isinstance(test_elements, Dataset):
raise TypeError(
'isin() argument must be convertible to an array: {}'
.format(test_elements))
elif isinstance(test_elements, (Variable, DataArray)):
# need to explicitly pull out data to support dask arrays as the
# second argument
test_elements = test_elements.data
return apply_ufunc(
duck_array_ops.isin,
self,
kwargs=dict(test_elements=test_elements),
dask='allowed',
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def full_like(other, fill_value, dtype=None):
"""Return a new object with the same shape and type as a given object.
Parameters
----------
other : DataArray, Dataset, or Variable
The reference object in input
fill_value : scalar
Value to fill the new object with before returning it.
dtype : dtype, optional
dtype of the new array. If omitted, it defaults to other.dtype.
Returns
-------
out : same as object
New object with the same shape and type as other, with the data
filled with fill_value. Coords will be copied from other.
If other is based on dask, the new one will be as well, and will be
split in the same chunks.
"""
from .dataarray import DataArray
from .dataset import Dataset
from .variable import Variable
if isinstance(other, Dataset):
data_vars = OrderedDict(
(k, _full_like_variable(v, fill_value, dtype))
for k, v in other.data_vars.items())
return Dataset(data_vars, coords=other.coords, attrs=other.attrs)
elif isinstance(other, DataArray):
return DataArray(
_full_like_variable(other.variable, fill_value, dtype),
dims=other.dims, coords=other.coords, attrs=other.attrs,
name=other.name)
elif isinstance(other, Variable):
return _full_like_variable(other, fill_value, dtype)
else:
raise TypeError("Expected DataArray, Dataset, or Variable")
def _full_like_variable(other, fill_value, dtype=None):
"""Inner function of full_like, where other must be a variable
"""
from .variable import Variable
if isinstance(other.data, dask_array_type):
import dask.array
if dtype is None:
dtype = other.dtype
data = dask.array.full(other.shape, fill_value, dtype=dtype,
chunks=other.data.chunks)
else:
data = np.full_like(other, fill_value, dtype=dtype)
return Variable(dims=other.dims, data=data, attrs=other.attrs)
def zeros_like(other, dtype=None):
"""Shorthand for full_like(other, 0, dtype)
"""
return full_like(other, 0, dtype)
def ones_like(other, dtype=None):
"""Shorthand for full_like(other, 1, dtype)
"""
return full_like(other, 1, dtype)
def is_np_datetime_like(dtype):
"""Check if a dtype is a subclass of the numpy datetime types
"""
return (np.issubdtype(dtype, np.datetime64) or
np.issubdtype(dtype, np.timedelta64))
def contains_cftime_datetimes(var):
"""Check if a variable contains cftime datetime objects"""
try:
from cftime import datetime as cftime_datetime
except ImportError:
return False
else:
if var.dtype == np.dtype('O') and var.data.size > 0:
sample = var.data.ravel()[0]
if isinstance(sample, dask_array_type):
sample = sample.compute()
if isinstance(sample, np.ndarray):
sample = sample.item()
return isinstance(sample, cftime_datetime)
else:
return False
def _contains_datetime_like_objects(var):
"""Check if a variable contains datetime like objects (either
np.datetime64, np.timedelta64, or cftime.datetime)"""
return is_np_datetime_like(var.dtype) or contains_cftime_datetimes(var)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/common.py",
"copies": "1",
"size": "36053",
"license": "apache-2.0",
"hash": 8784939485576623000,
"line_mean": 36.3219461698,
"line_max": 90,
"alpha_frac": 0.5509943694,
"autogenerated": false,
"ratio": 4.2696589294173375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5320653298817337,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import warnings
from distutils.version import LooseVersion
import numpy as np
from . import dtypes
from .dask_array_ops import dask_rolling_wrapper
from .ops import (
bn, has_bottleneck, inject_bottleneck_rolling_methods,
inject_datasetrolling_methods)
from .pycompat import OrderedDict, dask_array_type, zip
def _get_new_dimname(dims, new_dim):
""" Get an new dimension name based on new_dim, that is not used in dims.
If the same name exists, we add an underscore(s) in the head.
Example1:
dims: ['a', 'b', 'c']
new_dim: ['_rolling']
-> ['_rolling']
Example2:
dims: ['a', 'b', 'c', '_rolling']
new_dim: ['_rolling']
-> ['__rolling']
"""
while new_dim in dims:
new_dim = '_' + new_dim
return new_dim
class Rolling(object):
"""A object that implements the moving window pattern.
See Also
--------
Dataset.groupby
DataArray.groupby
Dataset.rolling
DataArray.rolling
"""
_attributes = ['window', 'min_periods', 'center', 'dim']
def __init__(self, obj, min_periods=None, center=False, **windows):
"""
Moving window object.
Parameters
----------
obj : Dataset or DataArray
Object to window.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : boolean, default False
Set the labels at the center of the window.
**windows : dim=window
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
Returns
-------
rolling : type of input argument
"""
if (has_bottleneck and
(LooseVersion(bn.__version__) < LooseVersion('1.0'))):
warnings.warn('xarray requires bottleneck version of 1.0 or '
'greater for rolling operations. Rolling '
'aggregation methods will use numpy instead'
'of bottleneck.')
if len(windows) != 1:
raise ValueError('exactly one dim/window should be provided')
dim, window = next(iter(windows.items()))
if window <= 0:
raise ValueError('window must be > 0')
self.obj = obj
# attributes
self.window = window
self.min_periods = min_periods
if min_periods is None:
self._min_periods = window
else:
if min_periods <= 0:
raise ValueError(
'min_periods must be greater than zero or None')
self._min_periods = min_periods
self.center = center
self.dim = dim
def __repr__(self):
"""provide a nice str repr of our rolling object"""
attrs = ["{k}->{v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self.__class__.__name__,
attrs=','.join(attrs))
def __len__(self):
return self.obj.sizes[self.dim]
class DataArrayRolling(Rolling):
def __init__(self, obj, min_periods=None, center=False, **windows):
"""
Moving window object for DataArray.
You should use DataArray.rolling() method to construct this object
instead of the class constructor.
Parameters
----------
obj : DataArray
Object to window.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : boolean, default False
Set the labels at the center of the window.
**windows : dim=window
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
Returns
-------
rolling : type of input argument
See Also
--------
DataArray.rolling
DataArray.groupby
Dataset.rolling
Dataset.groupby
"""
super(DataArrayRolling, self).__init__(obj, min_periods=min_periods,
center=center, **windows)
self.window_labels = self.obj[self.dim]
def __iter__(self):
stops = np.arange(1, len(self.window_labels) + 1)
starts = stops - int(self.window)
starts[:int(self.window)] = 0
for (label, start, stop) in zip(self.window_labels, starts, stops):
window = self.obj.isel(**{self.dim: slice(start, stop)})
counts = window.count(dim=self.dim)
window = window.where(counts >= self._min_periods)
yield (label, window)
def construct(self, window_dim, stride=1, fill_value=dtypes.NA):
"""
Convert this rolling object to xr.DataArray,
where the window dimension is stacked as a new dimension
Parameters
----------
window_dim: str
New name of the window dimension.
stride: integer, optional
Size of stride for the rolling window.
fill_value: optional. Default dtypes.NA
Filling value to match the dimension size.
Returns
-------
DataArray that is a view of the original array. The returned array is
not writeable.
Examples
--------
>>> da = DataArray(np.arange(8).reshape(2, 4), dims=('a', 'b'))
>>>
>>> rolling = da.rolling(a=3)
>>> rolling.to_datarray('window_dim')
<xarray.DataArray (a: 2, b: 4, window_dim: 3)>
array([[[np.nan, np.nan, 0], [np.nan, 0, 1], [0, 1, 2], [1, 2, 3]],
[[np.nan, np.nan, 4], [np.nan, 4, 5], [4, 5, 6], [5, 6, 7]]])
Dimensions without coordinates: a, b, window_dim
>>>
>>> rolling = da.rolling(a=3, center=True)
>>> rolling.to_datarray('window_dim')
<xarray.DataArray (a: 2, b: 4, window_dim: 3)>
array([[[np.nan, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, np.nan]],
[[np.nan, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, np.nan]]])
Dimensions without coordinates: a, b, window_dim
"""
from .dataarray import DataArray
window = self.obj.variable.rolling_window(self.dim, self.window,
window_dim, self.center,
fill_value=fill_value)
result = DataArray(window, dims=self.obj.dims + (window_dim,),
coords=self.obj.coords)
return result.isel(**{self.dim: slice(None, None, stride)})
def reduce(self, func, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, **kwargs)` to return the result of collapsing an
np.ndarray over an the rolling dimension.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
Array with summarized data.
"""
rolling_dim = _get_new_dimname(self.obj.dims, '_rolling_dim')
windows = self.construct(rolling_dim)
result = windows.reduce(func, dim=rolling_dim, **kwargs)
# Find valid windows based on count.
counts = self._counts()
return result.where(counts >= self._min_periods)
def _counts(self):
""" Number of non-nan entries in each rolling window. """
rolling_dim = _get_new_dimname(self.obj.dims, '_rolling_dim')
# We use False as the fill_value instead of np.nan, since boolean
# array is faster to be reduced than object array.
# The use of skipna==False is also faster since it does not need to
# copy the strided array.
counts = (self.obj.notnull()
.rolling(center=self.center, **{self.dim: self.window})
.construct(rolling_dim, fill_value=False)
.sum(dim=rolling_dim, skipna=False))
return counts
@classmethod
def _reduce_method(cls, func):
"""
Methods to return a wrapped function for any function `func` for
numpy methods.
"""
def wrapped_func(self, **kwargs):
return self.reduce(func, **kwargs)
return wrapped_func
@classmethod
def _bottleneck_reduce(cls, func):
"""
Methods to return a wrapped function for any function `func` for
bottoleneck method, except for `median`.
"""
def wrapped_func(self, **kwargs):
from .dataarray import DataArray
# bottleneck doesn't allow min_count to be 0, although it should
# work the same as if min_count = 1
if self.min_periods is not None and self.min_periods == 0:
min_count = 1
else:
min_count = self.min_periods
axis = self.obj.get_axis_num(self.dim)
padded = self.obj.variable
if self.center:
if (LooseVersion(np.__version__) < LooseVersion('1.13') and
self.obj.dtype.kind == 'b'):
# with numpy < 1.13 bottleneck cannot handle np.nan-Boolean
# mixed array correctly. We cast boolean array to float.
padded = padded.astype(float)
if isinstance(padded.data, dask_array_type):
# Workaround to make the padded chunk size is larger than
# self.window-1
shift = - (self.window + 1) // 2
offset = (self.window - 1) // 2
valid = (slice(None), ) * axis + (
slice(offset, offset + self.obj.shape[axis]), )
else:
shift = (-self.window // 2) + 1
valid = (slice(None), ) * axis + (slice(-shift, None), )
padded = padded.pad_with_fill_value(**{self.dim: (0, -shift)})
if isinstance(padded.data, dask_array_type):
values = dask_rolling_wrapper(func, padded,
window=self.window,
min_count=min_count,
axis=axis)
else:
values = func(padded.data, window=self.window,
min_count=min_count, axis=axis)
if self.center:
values = values[valid]
result = DataArray(values, self.obj.coords)
return result
return wrapped_func
class DatasetRolling(Rolling):
def __init__(self, obj, min_periods=None, center=False, **windows):
"""
Moving window object for Dataset.
You should use Dataset.rolling() method to construct this object
instead of the class constructor.
Parameters
----------
obj : Dataset
Object to window.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : boolean, default False
Set the labels at the center of the window.
**windows : dim=window
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
Returns
-------
rolling : type of input argument
See Also
--------
Dataset.rolling
DataArray.rolling
Dataset.groupby
DataArray.groupby
"""
super(DatasetRolling, self).__init__(obj,
min_periods, center, **windows)
if self.dim not in self.obj.dims:
raise KeyError(self.dim)
# Keep each Rolling object as an OrderedDict
self.rollings = OrderedDict()
for key, da in self.obj.data_vars.items():
# keeps rollings only for the dataset depending on slf.dim
if self.dim in da.dims:
self.rollings[key] = DataArrayRolling(da, min_periods,
center, **windows)
def reduce(self, func, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, **kwargs)` to return the result of collapsing an
np.ndarray over an the rolling dimension.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
Array with summarized data.
"""
from .dataset import Dataset
reduced = OrderedDict()
for key, da in self.obj.data_vars.items():
if self.dim in da.dims:
reduced[key] = self.rollings[key].reduce(func, **kwargs)
else:
reduced[key] = self.obj[key]
return Dataset(reduced, coords=self.obj.coords)
def _counts(self):
from .dataset import Dataset
reduced = OrderedDict()
for key, da in self.obj.data_vars.items():
if self.dim in da.dims:
reduced[key] = self.rollings[key]._counts()
else:
reduced[key] = self.obj[key]
return Dataset(reduced, coords=self.obj.coords)
@classmethod
def _reduce_method(cls, func):
"""
Return a wrapped function for injecting numpy and bottoleneck methods.
see ops.inject_datasetrolling_methods
"""
def wrapped_func(self, **kwargs):
from .dataset import Dataset
reduced = OrderedDict()
for key, da in self.obj.data_vars.items():
if self.dim in da.dims:
reduced[key] = getattr(self.rollings[key],
func.__name__)(**kwargs)
else:
reduced[key] = self.obj[key]
return Dataset(reduced, coords=self.obj.coords)
return wrapped_func
def construct(self, window_dim, stride=1, fill_value=dtypes.NA):
"""
Convert this rolling object to xr.Dataset,
where the window dimension is stacked as a new dimension
Parameters
----------
window_dim: str
New name of the window dimension.
stride: integer, optional
size of stride for the rolling window.
fill_value: optional. Default dtypes.NA
Filling value to match the dimension size.
Returns
-------
Dataset with variables converted from rolling object.
"""
from .dataset import Dataset
dataset = OrderedDict()
for key, da in self.obj.data_vars.items():
if self.dim in da.dims:
dataset[key] = self.rollings[key].construct(
window_dim, fill_value=fill_value)
else:
dataset[key] = da
return Dataset(dataset, coords=self.obj.coords).isel(
**{self.dim: slice(None, None, stride)})
inject_bottleneck_rolling_methods(DataArrayRolling)
inject_datasetrolling_methods(DatasetRolling)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/rolling.py",
"copies": "1",
"size": "16276",
"license": "apache-2.0",
"hash": 1268512750411181800,
"line_mean": 34.4596949891,
"line_max": 79,
"alpha_frac": 0.5381543377,
"autogenerated": false,
"ratio": 4.412035782054757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5450190119754758,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import warnings
from functools import wraps
import pandas as pd
from pandas.core.window import Rolling as pd_Rolling
from ..base import tokenize
from ..utils import M, funcname, derived_from
from .core import _emulate
from .utils import make_meta
def overlap_chunk(func, prev_part, current_part, next_part, before, after,
args, kwargs):
if ((prev_part is not None and prev_part.shape[0] != before) or
(next_part is not None and next_part.shape[0] != after)):
raise NotImplementedError("Partition size is less than overlapping "
"window size. Try using ``df.repartition`` "
"to increase the partition size.")
parts = [p for p in (prev_part, current_part, next_part) if p is not None]
combined = pd.concat(parts)
out = func(combined, *args, **kwargs)
if prev_part is None:
before = None
if next_part is None:
return out.iloc[before:]
return out.iloc[before:-after]
def map_overlap(func, df, before, after, *args, **kwargs):
"""Apply a function to each partition, sharing rows with adjacent partitions.
Parameters
----------
func : function
Function applied to each partition.
df : dd.DataFrame, dd.Series
before : int
The number of rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int
The number of rows to append to partition ``i`` from the beginning
of partition ``i + 1``.
args, kwargs :
Arguments and keywords to pass to the function. The partition will
be the first argument, and these will be passed *after*.
See Also
--------
dd.DataFrame.map_overlap
"""
if not (isinstance(before, int) and before >= 0 and
isinstance(after, int) and after >= 0):
raise ValueError("before and after must be positive integers")
if 'token' in kwargs:
func_name = kwargs.pop('token')
token = tokenize(df, before, after, *args, **kwargs)
else:
func_name = 'overlap-' + funcname(func)
token = tokenize(func, df, before, after, *args, **kwargs)
if 'meta' in kwargs:
meta = kwargs.pop('meta')
else:
meta = _emulate(func, df, *args, **kwargs)
meta = make_meta(meta)
name = '{0}-{1}'.format(func_name, token)
name_a = 'overlap-prepend-' + tokenize(df, before)
name_b = 'overlap-append-' + tokenize(df, after)
df_name = df._name
dsk = df.dask.copy()
if before:
dsk.update({(name_a, i): (M.tail, (df_name, i), before)
for i in range(df.npartitions - 1)})
prevs = [None] + [(name_a, i) for i in range(df.npartitions - 1)]
else:
prevs = [None] * df.npartitions
if after:
dsk.update({(name_b, i): (M.head, (df_name, i), after)
for i in range(1, df.npartitions)})
nexts = [(name_b, i) for i in range(1, df.npartitions)] + [None]
else:
nexts = [None] * df.npartitions
for i, (prev, current, next) in enumerate(zip(prevs, df._keys(), nexts)):
dsk[(name, i)] = (overlap_chunk, func, prev, current, next, before,
after, args, kwargs)
return df._constructor(dsk, name, meta, df.divisions)
def wrap_rolling(func, method_name):
"""Create a chunked version of a pandas.rolling_* function"""
@wraps(func)
def rolling(arg, window, *args, **kwargs):
# pd.rolling_* functions are deprecated
warnings.warn(("DeprecationWarning: dd.rolling_{0} is deprecated and "
"will be removed in a future version, replace with "
"df.rolling(...).{0}(...)").format(method_name))
rolling_kwargs = {}
method_kwargs = {}
for k, v in kwargs.items():
if k in {'min_periods', 'center', 'win_type', 'axis', 'freq'}:
rolling_kwargs[k] = v
else:
method_kwargs[k] = v
rolling = arg.rolling(window, **rolling_kwargs)
return getattr(rolling, method_name)(*args, **method_kwargs)
return rolling
rolling_count = wrap_rolling(pd.rolling_count, 'count')
rolling_sum = wrap_rolling(pd.rolling_sum, 'sum')
rolling_mean = wrap_rolling(pd.rolling_mean, 'mean')
rolling_median = wrap_rolling(pd.rolling_median, 'median')
rolling_min = wrap_rolling(pd.rolling_min, 'min')
rolling_max = wrap_rolling(pd.rolling_max, 'max')
rolling_std = wrap_rolling(pd.rolling_std, 'std')
rolling_var = wrap_rolling(pd.rolling_var, 'var')
rolling_skew = wrap_rolling(pd.rolling_skew, 'skew')
rolling_kurt = wrap_rolling(pd.rolling_kurt, 'kurt')
rolling_quantile = wrap_rolling(pd.rolling_quantile, 'quantile')
rolling_apply = wrap_rolling(pd.rolling_apply, 'apply')
@wraps(pd.rolling_window)
def rolling_window(arg, window, **kwargs):
if kwargs.pop('mean', True):
return rolling_mean(arg, window, **kwargs)
return rolling_sum(arg, window, **kwargs)
def pandas_rolling_method(df, rolling_kwargs, name, *args, **kwargs):
rolling = df.rolling(**rolling_kwargs)
return getattr(rolling, name)(*args, **kwargs)
class Rolling(object):
"""Provides rolling window calculations."""
def __init__(self, obj, window=None, min_periods=None, freq=None,
center=False, win_type=None, axis=0):
if freq is not None:
msg = 'The deprecated freq argument is not supported.'
raise NotImplementedError(msg)
self.obj = obj # dataframe or series
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.axis = axis
# Allow pandas to raise if appropriate
obj._meta.rolling(**self._rolling_kwargs())
def _rolling_kwargs(self):
return {'window': self.window,
'min_periods': self.min_periods,
'center': self.center,
'win_type': self.win_type,
'axis': self.axis}
def _call_method(self, method_name, *args, **kwargs):
rolling_kwargs = self._rolling_kwargs()
meta = pandas_rolling_method(self.obj._meta_nonempty, rolling_kwargs,
method_name, *args, **kwargs)
if (self.axis in (1, 'columns') or self.window <= 1 or
self.obj.npartitions == 1):
# There's no overlap just use map_partitions
return self.obj.map_partitions(pandas_rolling_method,
rolling_kwargs, method_name,
*args, token=method_name, meta=meta,
**kwargs)
# Convert window to overlap
if self.center:
before = self.window // 2
after = self.window - before - 1
else:
before = self.window - 1
after = 0
return map_overlap(pandas_rolling_method, self.obj, before, after,
rolling_kwargs, method_name, *args,
token=method_name, meta=meta, **kwargs)
@derived_from(pd_Rolling)
def count(self):
return self._call_method('count')
@derived_from(pd_Rolling)
def sum(self):
return self._call_method('sum')
@derived_from(pd_Rolling)
def mean(self):
return self._call_method('mean')
@derived_from(pd_Rolling)
def median(self):
return self._call_method('median')
@derived_from(pd_Rolling)
def min(self):
return self._call_method('min')
@derived_from(pd_Rolling)
def max(self):
return self._call_method('max')
@derived_from(pd_Rolling)
def std(self, ddof=1):
return self._call_method('std', ddof=1)
@derived_from(pd_Rolling)
def var(self, ddof=1):
return self._call_method('var', ddof=1)
@derived_from(pd_Rolling)
def skew(self):
return self._call_method('skew')
@derived_from(pd_Rolling)
def kurt(self):
return self._call_method('kurt')
@derived_from(pd_Rolling)
def quantile(self, quantile):
return self._call_method('quantile', quantile)
@derived_from(pd_Rolling)
def apply(self, func, args=(), kwargs={}):
return self._call_method('apply', func, args=args, kwargs=kwargs)
def __repr__(self):
return 'Rolling [{}]'.format(','.join(
'{}={}'.format(k, v)
for k, v in self._rolling_kwargs().items() if v is not None))
| {
"repo_name": "gameduell/dask",
"path": "dask/dataframe/rolling.py",
"copies": "3",
"size": "8631",
"license": "bsd-3-clause",
"hash": -9126737527843209000,
"line_mean": 34.2285714286,
"line_max": 81,
"alpha_frac": 0.5871857259,
"autogenerated": false,
"ratio": 3.713855421686747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5801041147586747,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import warnings
from operator import attrgetter
from hashlib import md5
from functools import partial
from toolz import merge, groupby, curry
from toolz.functoolz import Compose
from .compatibility import bind_method
from .context import _globals
from .utils import Dispatch, ignoring
__all__ = ("Base", "compute", "normalize_token", "tokenize", "visualize")
class Base(object):
"""Base class for dask collections"""
def visualize(self, filename='mydask', format=None, optimize_graph=False,
**kwargs):
return visualize(self, filename=filename, format=format,
optimize_graph=optimize_graph, **kwargs)
def _visualize(self, filename='mydask', format=None, optimize_graph=False):
warn = DeprecationWarning("``_visualize`` is deprecated, use "
"``visualize`` instead.")
warnings.warn(warn)
return self.visualize(filename=filename, format=format,
optimize_graph=optimize_graph)
def compute(self, **kwargs):
return compute(self, **kwargs)[0]
@classmethod
def _get(cls, dsk, keys, get=None, **kwargs):
get = get or _globals['get'] or cls._default_get
dsk2 = cls._optimize(dsk, keys)
return get(dsk2, keys, **kwargs)
@classmethod
def _bind_operator(cls, op):
""" bind operator to this class """
name = op.__name__
if name.endswith('_'):
# for and_ and or_
name = name[:-1]
elif name == 'inv':
name = 'invert'
meth = '__{0}__'.format(name)
if name in ('abs', 'invert', 'neg'):
bind_method(cls, meth, cls._get_unary_operator(op))
else:
bind_method(cls, meth, cls._get_binary_operator(op))
if name in ('eq', 'gt', 'ge', 'lt', 'le', 'ne'):
return
rmeth = '__r{0}__'.format(name)
bind_method(cls, rmeth, cls._get_binary_operator(op, inv=True))
@classmethod
def _get_unary_operator(cls, op):
""" Must return a method used by unary operator """
raise NotImplementedError
@classmethod
def _get_binary_operator(cls, op, inv=False):
""" Must return a method used by binary operator """
raise NotImplementedError
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
"""
groups = groupby(attrgetter('_optimize'), args)
get = kwargs.pop('get', None) or _globals['get']
if not get:
get = args[0]._default_get
if not all(a._default_get == get for a in args):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
dsk = merge([opt(merge([v.dask for v in val]), [v._keys() for v in val])
for opt, val in groups.items()])
keys = [arg._keys() for arg in args]
results = get(dsk, keys, **kwargs)
return tuple(a._finalize(a, r) for a, r in zip(args, results))
def visualize(*args, **kwargs):
filename = kwargs.pop('filename', 'mydask')
optimize_graph = kwargs.pop('optimize_graph', False)
from dask.dot import dot_graph
if optimize_graph:
dsks = [arg._optimize(arg.dask, arg._keys()) for arg in args]
else:
dsks = [arg.dask for arg in args]
dsk = merge(dsks)
return dot_graph(dsk, filename=filename, **kwargs)
def normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
kws = tuple(sorted(func.keywords.items())) if func.keywords else ()
return (normalize_function(func.func), func.args, kws)
else:
return str(func)
normalize_token = Dispatch()
normalize_token.register((int, float, str, tuple, list), lambda a: a)
normalize_token.register(object,
lambda a: normalize_function(a) if callable(a) else a)
normalize_token.register(dict, lambda a: tuple(sorted(a.items())))
with ignoring(ImportError):
import pandas as pd
@partial(normalize_token.register, pd.Index)
def normalize_index(ind):
return [ind.name, normalize_token(ind.values)]
@partial(normalize_token.register, pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes),
normalize_token(cat.categories),
cat.ordered]
@partial(normalize_token.register, pd.Series)
def normalize_series(s):
return [s.name, s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index)]
@partial(normalize_token.register, pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data += [df.columns, df.index]
return list(map(normalize_token, data))
with ignoring(ImportError):
import numpy as np
@partial(normalize_token.register, np.ndarray)
def normalize_array(x):
if not x.shape:
return (str(x), x.dtype)
if x.dtype.hasobject:
try:
data = md5('-'.join(x.flat)).hexdigest()
except TypeError:
data = md5(b'-'.join([str(item).encode() for item in x.flat])).hexdigest()
else:
try:
data = md5(x.ravel().view('i1').data).hexdigest()
except (BufferError, AttributeError, ValueError):
data = md5(x.copy().ravel().view('i1').data).hexdigest()
return (data, x.dtype, x.shape, x.strides)
def tokenize(*args, **kwargs):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'9d71491b50023b06fc76928e6eddb952'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
| {
"repo_name": "pombredanne/dask",
"path": "dask/base.py",
"copies": "1",
"size": "6501",
"license": "bsd-3-clause",
"hash": -248236159776766340,
"line_mean": 32.3384615385,
"line_max": 90,
"alpha_frac": 0.5945239194,
"autogenerated": false,
"ratio": 3.8151408450704225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4909664764470423,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import warnings
from os.path import basename
from collections import OrderedDict
from glue.core.coordinates import coordinates_from_header, WCSCoordinates
from glue.core.data import Component, Data
from glue.config import data_factory, qglue_parser
__all__ = ['is_fits', 'fits_reader', 'is_casalike', 'casalike_cube']
def is_fits(filename):
from astropy.io import fits
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with fits.open(filename, ignore_missing_end=True):
return True
except IOError:
return False
@data_factory(
label='FITS file',
identifier=is_fits,
priority=100,
)
def fits_reader(source, auto_merge=False, exclude_exts=None, label=None):
"""
Read in all extensions from a FITS file.
Parameters
----------
source: str or HDUList
The pathname to the FITS file.
If an HDUList is passed in, simply use that.
auto_merge: bool
Merge extensions that have the same shape
and only one has a defined WCS.
exclude_exts: [hdu, ] or [index, ]
List of HDU's to exclude from reading.
This can be a list of HDU's or a list
of HDU indexes.
"""
from astropy.io import fits
from astropy.table import Table
exclude_exts = exclude_exts or []
if not isinstance(source, fits.hdu.hdulist.HDUList):
hdulist = fits.open(source, ignore_missing_end=True)
hdulist.verify('fix')
else:
hdulist = source
groups = OrderedDict()
extension_by_shape = OrderedDict()
if label is not None:
label_base = label
else:
hdulist_name = hdulist.filename()
if hdulist_name is None:
hdulist_name = "HDUList"
label_base = basename(hdulist_name).rpartition('.')[0]
if not label_base:
label_base = basename(hdulist_name)
# Create a new image Data.
def new_data():
label = '{0}[{1}]'.format(
label_base,
hdu_name
)
data = Data(label=label)
data.coords = coords
groups[hdu_name] = data
extension_by_shape[shape] = hdu_name
return data
for extnum, hdu in enumerate(hdulist):
hdu_name = hdu.name if hdu.name else "HDU{0}".format(extnum)
if (hdu.data is not None and
hdu.data.size > 0 and
hdu_name not in exclude_exts and
extnum not in exclude_exts):
if is_image_hdu(hdu):
shape = hdu.data.shape
coords = coordinates_from_header(hdu.header)
if not auto_merge or has_wcs(coords):
data = new_data()
else:
try:
data = groups[extension_by_shape[shape]]
except KeyError:
data = new_data()
data.add_component(component=hdu.data,
label=hdu_name)
elif is_table_hdu(hdu):
# Loop through columns and make component list
table = Table.read(hdu, format='fits')
label = '{0}[{1}]'.format(
label_base,
hdu_name
)
data = Data(label=label)
groups[hdu_name] = data
for column_name in table.columns:
column = table[column_name]
if column.ndim != 1:
warnings.warn("Dropping column '{0}' since it is not 1-dimensional".format(column_name))
continue
component = Component.autotyped(column, units=column.unit)
data.add_component(component=component,
label=column_name)
return [groups[idx] for idx in groups]
# Utilities
def is_image_hdu(hdu):
from astropy.io.fits.hdu import PrimaryHDU, ImageHDU, CompImageHDU
return isinstance(hdu, (PrimaryHDU, ImageHDU, CompImageHDU))
def is_table_hdu(hdu):
from astropy.io.fits.hdu import TableHDU, BinTableHDU
return isinstance(hdu, (TableHDU, BinTableHDU))
def has_wcs(coords):
return (isinstance(coords, WCSCoordinates) and
any(axis['coordinate_type'] is not None
for axis in coords.wcs.get_axis_types()))
def is_casalike(filename, **kwargs):
"""
Check if a FITS file is a CASA like cube,
with (P, P, V, Stokes) layout
"""
from astropy.io import fits
if not is_fits(filename):
return False
with fits.open(filename, ignore_missing_end=True) as hdulist:
if len(hdulist) != 1:
return False
if hdulist[0].header['NAXIS'] != 4:
return False
from astropy.wcs import WCS
w = WCS(hdulist[0].header)
ax = [a.get('coordinate_type') for a in w.get_axis_types()]
return ax == ['celestial', 'celestial', 'spectral', 'stokes']
@data_factory(label='CASA PPV Cube', identifier=is_casalike, deprecated=True)
def casalike_cube(filename, **kwargs):
"""
This provides special support for 4D CASA FITS - like cubes,
which have 2 spatial axes, a spectral axis, and a stokes axis
in that order.
Each stokes cube is split out as a separate component
"""
from astropy.io import fits
result = Data()
if 'ignore_missing_end' not in kwargs:
kwargs['ignore_missing_end'] = True
with fits.open(filename, **kwargs) as hdulist:
array = hdulist[0].data
header = hdulist[0].header
result.coords = coordinates_from_header(header)
for i in range(array.shape[0]):
result.add_component(array[[i]], label='STOKES %i' % i)
return result
try:
from astropy.io.fits import HDUList
except ImportError:
pass
else:
# Put HDUList parser before list parser
@qglue_parser(HDUList, priority=100)
def _parse_data_hdulist(data, label):
from glue.core.data_factories.fits import fits_reader
return fits_reader(data, label=label)
| {
"repo_name": "saimn/glue",
"path": "glue/core/data_factories/fits.py",
"copies": "3",
"size": "6169",
"license": "bsd-3-clause",
"hash": -8495161455730323000,
"line_mean": 29.5396039604,
"line_max": 112,
"alpha_frac": 0.5882639001,
"autogenerated": false,
"ratio": 3.789312039312039,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022066948194586575,
"num_lines": 202
} |
from __future__ import absolute_import, division, print_function
import warnings
import functools
import inspect
import datetime
import tempfile
import os
import shutil
import numpy as np
from contextlib import contextmanager
from multiprocessing.pool import ThreadPool
from multipledispatch import Dispatcher
from datashape import dshape, Record
from datashape.discovery import is_zero_time
from toolz import pluck, get, curry, keyfilter
from .compatibility import unicode
sample = Dispatcher('sample')
def iter_except(func, exception, first=None):
"""Call a `func` repeatedly until `exception` is raised. Optionally call
`first` first.
Parameters
----------
func : callable
Repeatedly call this until `exception` is raised.
exception : Exception
Stop calling `func` when this is raised.
first : callable, optional, default ``None``
Call this first if it isn't ``None``.
Examples
--------
>>> x = {'a': 1, 'b': 2}
>>> def iterate():
... yield 'a'
... yield 'b'
... yield 'c'
...
>>> keys = iterate()
>>> diter = iter_except(lambda: x[next(keys)], KeyError)
>>> list(diter)
[1, 2]
Notes
-----
* Taken from https://docs.python.org/2/library/itertools.html#recipes
"""
try:
if first is not None:
yield first()
while 1: # True isn't a reserved word in Python 2.x
yield func()
except exception:
pass
def ext(filename):
_, e = os.path.splitext(filename)
return e.lstrip(os.extsep)
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def expand_tuples(L):
"""
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
@contextmanager
def tmpfile(extension='', dir=None):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension, dir=dir)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
try:
os.remove(filename)
except OSError:
# sometimes we can't remove a generated temp file
pass
def keywords(func):
""" Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
"""
if isinstance(func, type):
return keywords(func.__init__)
return inspect.getargspec(func).args
def cls_name(cls):
if 'builtin' in cls.__module__:
return cls.__name__
else:
return cls.__module__.split('.')[0] + '.' + cls.__name__
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
try:
yield filename
finally:
if os.path.exists(filename):
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
try:
yield list(d)
finally:
for filename in d:
if os.path.exists(filename):
try:
os.remove(filename)
except OSError:
pass
def normalize_to_date(dt):
if isinstance(dt, datetime.datetime) and is_zero_time(dt.time()):
return dt.date()
else:
return dt
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
continue
if isinstance(left, datetime.datetime):
left = normalize_to_date(left)
if isinstance(right, datetime.datetime):
right = normalize_to_date(right)
assert left == right
def records_to_tuples(ds, data):
""" Transform records into tuples
Examples
--------
>>> seq = [{'a': 1, 'b': 10}, {'a': 2, 'b': 20}]
>>> list(records_to_tuples('var * {a: int, b: int}', seq))
[(1, 10), (2, 20)]
>>> records_to_tuples('{a: int, b: int}', seq[0]) # single elements
(1, 10)
>>> records_to_tuples('var * int', [1, 2, 3]) # pass through on non-records
[1, 2, 3]
See Also
--------
tuples_to_records
"""
if isinstance(ds, (str, unicode)):
ds = dshape(ds)
if isinstance(ds.measure, Record) and len(ds.shape) == 1:
return pluck(ds.measure.names, data, default=None)
if isinstance(ds.measure, Record) and len(ds.shape) == 0:
return get(ds.measure.names, data)
if not isinstance(ds.measure, Record):
return data
raise NotImplementedError()
def tuples_to_records(ds, data):
""" Transform tuples into records
Examples
--------
>>> seq = [(1, 10), (2, 20)]
>>> list(tuples_to_records('var * {a: int, b: int}', seq)) # doctest: +SKIP
[{'a': 1, 'b': 10}, {'a': 2, 'b': 20}]
>>> tuples_to_records('{a: int, b: int}', seq[0]) # doctest: +SKIP
{'a': 1, 'b': 10}
>>> tuples_to_records('var * int', [1, 2, 3]) # pass through on non-records
[1, 2, 3]
See Also
--------
records_to_tuples
"""
if isinstance(ds, (str, unicode)):
ds = dshape(ds)
if isinstance(ds.measure, Record) and len(ds.shape) == 1:
names = ds.measure.names
return (dict(zip(names, tup)) for tup in data)
if isinstance(ds.measure, Record) and len(ds.shape) == 0:
names = ds.measure.names
return dict(zip(names, data))
if not isinstance(ds.measure, Record):
return data
raise NotImplementedError()
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
def into_path(*path):
""" Path to file in into directory
>>> into_path('backends', 'tests', 'myfile.csv') # doctest: +SKIP
'/home/user/odo/odo/backends/tests/myfile.csv'
"""
import odo
return os.path.join(os.path.dirname(odo.__file__), *path)
@curry
def pmap(f, iterable):
"""Map `f` over `iterable` in parallel using a ``ThreadPool``.
"""
p = ThreadPool()
try:
result = p.map(f, iterable)
finally:
p.terminate()
return result
@curry
def write(triple, writer):
"""Write a file using the input from `gentemp` using `writer` and return
its index and filename.
Parameters
----------
triple : tuple of int, str, str
The first element is the index in the set of chunks of a file, the
second element is the path to write to, the third element is the data
to write.
Returns
-------
i, filename : int, str
File's index and filename. This is used to return the index and
filename after splitting files.
Notes
-----
This could be adapted to write to an already open handle, which would
allow, e.g., multipart gzip uploads. Currently we open write a new file
every time.
"""
i, filename, data = triple
with writer(filename, mode='wb') as f:
f.write(data)
return i, filename
def gentemp(it, suffix=None, start=0):
"""Yield an index, a temp file, and data for each element in `it`.
Parameters
----------
it : Iterable
suffix : str or ``None``, optional
Suffix to add to each temporary file's name
start : int, optional
A integer indicating where to start the numbering of chunks in `it`.
"""
for i, data in enumerate(it, start=start): # aws needs parts to start at 1
with tmpfile('.into') as fn:
yield i, fn, data
@curry
def split(filename, nbytes, suffix=None, writer=open, start=0):
"""Split a file into chunks of size `nbytes` with each filename containing
a suffix specified by `suffix`. The file will be written with the ``write``
method of an instance of `writer`.
Parameters
----------
filename : str
The file to split
nbytes : int
Split `filename` into chunks of this size
suffix : str, optional
writer : callable, optional
Callable object to use to write the chunks of `filename`
"""
with open(filename, mode='rb') as f:
byte_chunks = iter(curry(f.read, nbytes), '')
return pmap(write(writer=writer),
gentemp(byte_chunks, suffix=suffix, start=start))
def filter_kwargs(f, kwargs):
"""Return a dict of valid kwargs for `f` from a subset of `kwargs`
Examples
--------
>>> def f(a, b=1, c=2):
... return a + b + c
...
>>> raw_kwargs = dict(a=1, b=3, d=4)
>>> f(**raw_kwargs)
Traceback (most recent call last):
...
TypeError: f() got an unexpected keyword argument 'd'
>>> kwargs = filter_kwargs(f, raw_kwargs)
>>> f(**kwargs)
6
"""
return keyfilter(keywords(f).__contains__, kwargs)
@curry
def copydoc(from_, to):
"""Copies the docstring from one function to another.
Parameters
----------
from_ : any
The object to copy the docstring from.
to : any
The object to copy the docstring to.
Returns
-------
to : any
``to`` with the docstring from ``from_``
"""
to.__doc__ = from_.__doc__
return to
def deprecated(replacement=None):
"""A decorator which can be used to mark functions as deprecated.
replacement is a callable that will be called with the same args
as the decorated function.
"""
def outer(fun):
msg = "{} is deprecated".format(fun.__name__)
if replacement is not None:
msg += "; use {} instead".format(replacement)
if fun.__doc__ is None:
fun.__doc__ = msg
@functools.wraps(fun)
def inner(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return fun(*args, **kwargs)
return inner
return outer
def literal_compile(s):
"""Compile a sql expression with bind params inlined as literals.
Parameters
----------
s : Selectable
The expression to compile.
Returns
-------
cs : str
An equivalent sql string.
"""
return str(s.compile(compile_kwargs={'literal_binds': True}))
| {
"repo_name": "ContinuumIO/odo",
"path": "odo/utils.py",
"copies": "4",
"size": "11332",
"license": "bsd-3-clause",
"hash": 6774509659130769000,
"line_mean": 24.4651685393,
"line_max": 80,
"alpha_frac": 0.562919167,
"autogenerated": false,
"ratio": 3.9170411337711717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6479960300771173,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import warnings
from datashape import dshape
import flask
from flask.testing import FlaskClient
from odo import resource
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from toolz import assoc, keymap
from ..compatibility import u8
from ..expr import Expr
from ..dispatch import dispatch
from .server import DEFAULT_PORT
from .serialization import json
# These are a hack for testing
# It's convenient to use requests for live production but use
# flask for testing. Sadly they have different Response objects,
# hence the dispatched functions
__all__ = 'Client',
def content(response):
if isinstance(response, flask.Response):
return response.data
if isinstance(response, requests.Response):
return response.content
def ok(response):
if isinstance(response, flask.Response):
return 200 <= response.status_code <= 299
if isinstance(response, requests.Response):
return response.ok
def reason(response):
if isinstance(response, flask.Response):
return response.status
if isinstance(response, requests.Response):
return response.text
def _request(method, client, url, params=None, auth=None, **kwargs):
if not isinstance(requests, FlaskClient):
kwargs['verify'] = client.verify_ssl
kwargs['params'] = params
kwargs['auth'] = auth
with warnings.catch_warnings():
warnings.simplefilter('ignore', InsecureRequestWarning)
return method(
'{base}{url}'.format(
base=client.url,
url=url,
),
**kwargs
)
def get(*args, **kwargs):
return _request(requests.get, *args, **kwargs)
def post(*args, **kwargs):
return _request(requests.post, *args, **kwargs)
class Client(object):
""" Client for Blaze Server
Provides programmatic access to datasets living on Blaze Server
Parameters
----------
url : str
URL of a Blaze server
serial : SerializationFormat, optional
The serialization format object to use. Defaults to JSON.
A serialization format is an object that supports:
name, loads, and dumps.
verify_ssl : bool, optional
Verify the ssl certificate from the server.
This is enabled by default.
auth : tuple, optional
The username and password to use when connecting to the server.
If not provided, no auth header will be sent.
Examples
--------
>>> # This example matches with the docstring of ``Server``
>>> from blaze import data
>>> c = Client('localhost:6363')
>>> t = data(c) # doctest: +SKIP
See Also
--------
blaze.server.server.Server
"""
__slots__ = 'url', 'serial', 'verify_ssl', 'auth'
def __init__(self, url, serial=json, verify_ssl=True, auth=None, **kwargs):
url = url.strip('/')
if not url.startswith('http'):
url = 'http://' + url
self.url = url
self.serial = serial
self.verify_ssl = verify_ssl
self.auth = auth
@property
def dshape(self):
"""The datashape of the client"""
response = get(self, '/datashape', auth=self.auth)
if not ok(response):
raise ValueError("Bad Response: %s" % reason(response))
return dshape(content(response).decode('utf-8'))
def add(self, name, resource_uri, *args, **kwargs):
"""Add the given resource URI to the Blaze server.
Parameters
----------
name : str
The name to give the resource
resource_uri : str
The URI string describing the resource to add to the server, e.g
'sqlite:///path/to/file.db::table'
imports : list
A list of string names for any modules that must be imported on
the Blaze server before the resource can be added. This is identical
to the `imports` field in a Blaze server YAML file.
args : any, optional
Any additional positional arguments that can be passed to the
``blaze.resource`` constructor for this resource type
kwargs : any, optional
Any additional keyword arguments that can be passed to the
``blaze.resource`` constructor for this resource type
"""
payload = {name: {'source': resource_uri}}
imports = kwargs.pop('imports', None)
if imports is not None:
payload[name]['imports'] = imports
if args:
payload[name]['args'] = args
if kwargs:
payload[name]['kwargs'] = kwargs
response = post(self, '/add', auth=self.auth,
data=self.serial.dumps(payload),
headers={'Content-Type': 'application/vnd.blaze+' + self.serial.name})
# A special case for the "Not Found" error, since that means that this
# server doesn't support adding datasets, and the user should see a more
# helpful response
if response.status_code == 404:
raise ValueError("Server does not support dynamically adding datasets")
if not ok(response):
raise ValueError("Bad Response: %s" % reason(response))
@dispatch(Client)
def discover(c):
return c.dshape
def mimetype(serial):
"""Function to generate a blaze serialization format mimetype put into a
dictionary of headers for consumption by requests.
Examples
--------
>>> from blaze.server.serialization import msgpack
>>> mimetype(msgpack)
{'Content-Type': 'application/vnd.blaze+msgpack'}
"""
return {'Content-Type': 'application/vnd.blaze+%s' % serial.name}
@dispatch(Expr, Client)
def compute_down(expr,
ec,
profiler_output=None,
compute_kwargs=None,
odo_kwargs=None,
**kwargs):
"""Compute down for blaze clients.
Parameters
----------
expr : Expr
The expression to send to the server.
ec : Client
The blaze client to compute against.
namespace : dict[Symbol -> any], optional
The namespace to compute the expression in. This will be amended to
include that data for the server. By default this will just be the
client mapping to the server's data.
compute_kwargs : dict, optional
Extra kwargs to pass to compute on the server.
odo_kwargs : dict, optional
Extra kwargs to pass to odo on the server.
profile : bool, optional
Should blaze server run cProfile over the computation of the expression
and the serialization of the response.
profiler_output : file-like object, optional
A file like object to hold the profiling output from the server.
If this is not passed then the server will write the data to the
server's filesystem
"""
from .server import to_tree
kwargs = keymap(u8, kwargs)
tree = to_tree(expr)
serial = ec.serial
if profiler_output is not None:
kwargs[u'profile'] = True
kwargs[u'profiler_output'] = ':response'
kwargs[u'compute_kwargs'] = keymap(u8, compute_kwargs or {})
kwargs[u'odo_kwargs'] = keymap(u8, odo_kwargs or {})
r = post(
ec,
'/compute',
data=serial.dumps(assoc(kwargs, u'expr', tree)),
auth=ec.auth,
headers=mimetype(serial),
)
if not ok(r):
raise ValueError("Bad response: %s" % reason(r))
response = serial.loads(content(r))
if profiler_output is not None:
profiler_output.write(response[u'profiler_output'])
return serial.data_loads(response[u'data'])
@resource.register('blaze://.+')
def resource_blaze(uri, leaf=None, **kwargs):
if leaf is not None:
raise ValueError('The syntax blaze://...::{leaf} is no longer '
'supported as of version 0.8.1.\n'
'You can access {leaf!r} using this syntax:\n'
'data({uri})[{leaf!r}]'
.format(leaf=leaf, uri=uri))
uri = uri[len('blaze://'):]
sp = uri.split('/')
tld, rest = sp[0], sp[1:]
if ':' not in tld:
tld += ':%d' % DEFAULT_PORT
uri = '/'.join([tld] + list(rest))
return Client(uri)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/server/client.py",
"copies": "2",
"size": "8386",
"license": "bsd-3-clause",
"hash": 4445558518802110000,
"line_mean": 30.8859315589,
"line_max": 94,
"alpha_frac": 0.6148342476,
"autogenerated": false,
"ratio": 4.246075949367088,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001791727593875549,
"num_lines": 263
} |
from __future__ import (absolute_import, division, print_function)
import warnings
from odm2api import serviceBase
from odm2api.models import (
ActionAnnotations, ActionDirectives, ActionExtensionPropertyValues, Actions,
Affiliations, Annotations, AuthorLists, CVActionType, CVAggregationStatistic,
CVAnnotationType, CVCensorCode, CVDataQualityType, CVDataSetType, CVDirectiveType,
CVElevationDatum, CVEquipmentType, CVMediumType, CVMethodType, CVOrganizationType,
CVPropertyDataType, CVQualityCode, CVRelationshipType, CVResultType, CVSamplingFeatureGeoType,
CVSamplingFeatureType, CVSiteType, CVSpatialOffsetType, CVSpeciation, CVSpecimenType,
CVStatus, CVTaxonomicClassifierType, CVUnitsType, CVVariableName, CVVariableType,
CalibrationActions, CalibrationReferenceEquipment, CalibrationStandards,
CategoricalResultValueAnnotations, CategoricalResultValues, CitationExtensionPropertyValues,
CitationExternalIdentifiers, DataLoggerFileColumns, DataLoggerFiles, DataLoggerProgramFiles,
DataQuality, DataSetCitations, DataSets, DataSetsResults, DerivationEquations, Directives, Equipment,
EquipmentActions, EquipmentAnnotations, EquipmentModels, EquipmentUsed, ExtensionProperties,
ExternalIdentifierSystems, FeatureActions, InstrumentOutputVariables, MaintenanceActions,
MeasurementResultValueAnnotations, MeasurementResultValues, MethodAnnotations,
MethodCitations, MethodExtensionPropertyValues, MethodExternalIdentifiers,
Methods, Models, Organizations, People, PersonExternalIdentifiers,
PointCoverageResultValueAnnotations, PointCoverageResultValues, ProcessingLevels,
ProfileResultValueAnnotations, ProfileResultValues, ReferenceMaterialExternalIdentifiers,
ReferenceMaterialValues, ReferenceMaterials, RelatedActions, RelatedAnnotations,
RelatedCitations, RelatedDataSets, RelatedEquipment, RelatedFeatures, RelatedModels,
RelatedResults, ResultAnnotations, ResultDerivationEquations, ResultExtensionPropertyValues,
ResultNormalizationValues, Results, ResultsDataQuality, SamplingFeatureAnnotations,
SamplingFeatureExtensionPropertyValues, SamplingFeatureExternalIdentifiers,
SamplingFeatures, SectionResultValueAnnotations, SectionResults, Simulations,
SpatialReferenceExternalIdentifiers, SpatialReferences, SpecimenBatchPositions,
SpectraResultValueAnnotations, SpectraResultValues, TaxonomicClassifierExternalIdentifiers,
TaxonomicClassifiers, TimeSeriesResultValueAnnotations, TimeSeriesResultValues,
TrajectoryResultValueAnnotations, TrajectoryResultValues,
TransectResultValueAnnotations, TransectResultValues, Units, VariableExtensionPropertyValues,
VariableExternalIdentifiers, Variables,
)
import pandas as pd
from sqlalchemy import distinct, exists
from sqlalchemy.orm import contains_eager
__author__ = 'sreeder'
class DetailedResult:
def __init__(self, action, result,
sc, sn,
method, variable,
processingLevel,
unit):
# result.result_id etc.
self.ResultID = result.ResultID
self.SamplingFeatureCode = sc
self.MethodCode = method.MethodCode
self.VariableCode = variable.VariableCode
self.ProcessingLevelCode = processingLevel.ProcessingLevelCode
self.UnitsName = unit.UnitsName
self.SamplingFeatureName = sn
self.MethodName = method.MethodName
self.VariableNameCV = variable.VariableNameCV
self.ProcessingLevelDefinition = processingLevel.Definition
self.ValueCount = result.ValueCount
self.BeginDateTime = action.BeginDateTime
self.EndDateTime = action.EndDateTime
self.ResultObj = result
class DetailedAffiliation:
def __init__(self, affiliation, person, org):
self.AffiliationID = affiliation.AffiliationID
self.Name = person.PersonFirstName + ' ' + person.PersonLastName
self.Organization = '(' + org.OrganizationCode + ') ' + org.OrganizationName
class SamplingFeatureDataSet():
datasets = {}
related_features = {}
def __init__(self, samplingfeature, datasetresults, relatedfeatures):
sf = samplingfeature
self.SamplingFeatureID = sf.SamplingFeatureID
self.SamplingFeatureUUID = sf.SamplingFeatureUUID
self.SamplingFeatureTypeCV = sf.SamplingFeatureTypeCV
self.SamplingFeatureCode = sf.SamplingFeatureCode
self.SamplingFeatureName = sf.SamplingFeatureName
self.SamplingFeatureDescription = sf.SamplingFeatureDescription
self.SamplingFeatureGeotypeCV = sf.SamplingFeatureGeotypeCV
self.Elevation_m = sf.Elevation_m
self.ElevationDatumCV = sf.ElevationDatumCV
self.FeatureGeometryWKT = sf.FeatureGeometryWKT
self.assignDatasets(datasetresults)
self.assignRelatedFeatures(relatedfeatures)
print(self.datasets)
def assignDatasets(self, datasetresults):
self.datasets = {}
if datasetresults:
for dsr in datasetresults:
if dsr.DataSetObj not in self.datasets:
# if the dataset is not in the dictionary, add it and the first result
self.datasets[dsr.DataSetObj] = []
res = dsr.ResultObj
# res.FeatureActionObj = None
self.datasets[dsr.DataSetObj].append(res)
else:
# if the dataset is in the dictionary, append the result object to the list
res = dsr.ResultObj
# res.FeatureActionObj = None
self.datasets[dsr.DataSetObj].append(res)
def assignRelatedFeatures(self, relatedfeatures):
self.related_features = {}
if relatedfeatures:
for related in relatedfeatures:
if related.SamplingFeatureTypeCV == 'Site':
self.related_features = related
class ReadODM2(serviceBase):
def _get_columns(self, model):
"""Internal helper function to get a dictionary of a model column properties.
Args:
model (object): Sqlalchemy object, Ex. ODM2 model.
Returns:
dict: Dictionary of column properties Ex. {'resultid': 'ResultID'}
"""
from sqlalchemy.orm.properties import ColumnProperty
columns = [(prop.key.lower(), prop.key) for prop in model.__mapper__.iterate_properties if
isinstance(prop, ColumnProperty)]
return dict(columns)
def _check_kwargs(self, args, kwargs):
"""Internal helper function to check for unused keyword arguments
Args:
args (list): List of expected, valid arguments.
kwargs (dict): Dictionary of keyword arguments from user
Returns:
None
"""
invkwd = filter(lambda x: x not in args, kwargs.keys())
if invkwd:
warnings.warn('Got unexpected keyword argument(s) {}'.format(','.join(invkwd)), stacklevel=2)
# Exists functions
def resultExists(self, result):
"""
Check to see if a Result Object exists
* Pass Result Object - return a boolean value of wether the given object exists
"""
try:
ret = self._session.query(exists().where(Results.ResultTypeCV == result.ResultTypeCV)
.where(Results.VariableID == result.VariableID)
.where(Results.UnitsID == result.UnitsID)
.where(Results.ProcessingLevelID == result.ProcessingLevelID)
.where(Results.SampledMediumCV == result.SampledMediumCV)
)
return ret.scalar()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# Annotations
def getAnnotations(self, annottype=None, codes=None, ids=None, **kwargs):
"""
* Pass Nothing - return a list of all objects
* Pass AnnotationTypeCV - return a list of all objects of the fiven type
* Pass a list of codes - return a list of objects, one for each of the given codes
* Pass a list of ids -return a list of objects, one for each of the given ids
"""
# TODO What keywords do I use for type.
a = Annotations
self._check_kwargs(['type'], kwargs)
if 'type' in kwargs:
warnings.warn("The parameter 'type' is deprecated. Please use the annottype parameter instead.",
DeprecationWarning, stacklevel=2)
annottype = kwargs['type']
if annottype:
if annottype == 'action':
a = ActionAnnotations
elif annottype == 'categoricalresultvalue':
a = CategoricalResultValueAnnotations
elif annottype == 'equipmentannotation':
a = EquipmentAnnotations
elif annottype == 'measurementresultvalue':
a = MeasurementResultValueAnnotations
elif annottype == 'method':
a = MethodAnnotations
elif annottype == 'pointcoverageresultvalue':
a = PointCoverageResultValueAnnotations
elif annottype == 'profileresultvalue':
a = ProfileResultValueAnnotations
elif annottype == 'result':
a = ResultAnnotations
elif annottype == 'samplingfeature':
a = SamplingFeatureAnnotations
elif annottype == 'sectionresultvalue':
a = SectionResultValueAnnotations
elif annottype == 'spectraresultvalue':
a = SpectraResultValueAnnotations
elif annottype == 'timeseriesresultvalue':
a = TimeSeriesResultValueAnnotations
elif annottype == 'trajectoryresultvalue':
a = TrajectoryResultValueAnnotations
elif annottype == 'transectresultvalue':
a = TransectResultValueAnnotations
try:
query = self._session.query(a)
if codes:
query = query.filter(Annotations.AnnotationCode.in_(codes))
if ids:
query = query.filter(Annotations.AnnotationID.in_(ids))
return query.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# CV
def getCVs(self, cvtype, **kwargs):
"""
getCVs(self, type):
* Pass CVType - return a list of all objects of the given type
"""
self._check_kwargs(['type'], kwargs)
if 'type' in kwargs:
warnings.warn("The parameter 'type' is deprecated. Please use the cvtype parameter instead.",
DeprecationWarning, stacklevel=2)
cvtype = kwargs['type']
if cvtype == 'actiontype':
CV = CVActionType
elif cvtype == 'aggregationstatistic':
CV = CVAggregationStatistic
elif cvtype == 'annotationtype':
CV = CVAnnotationType
elif cvtype == 'censorcode':
CV = CVCensorCode
elif cvtype == 'dataqualitytype':
CV = CVDataQualityType
elif cvtype == 'dataset type':
CV = CVDataSetType
elif cvtype == 'Directive Type':
CV = CVDirectiveType
elif cvtype == 'Elevation Datum':
CV = CVElevationDatum
elif cvtype == 'Equipment Type':
CV = CVEquipmentType
elif cvtype == 'Medium':
CV = CVMediumType
elif cvtype == 'Method Type':
CV = CVMethodType
elif cvtype == 'Organization Type':
CV = CVOrganizationType
elif cvtype == 'Property Data Type':
CV = CVPropertyDataType
elif cvtype == 'Quality Code':
CV = CVQualityCode
elif cvtype == 'Relationship Type':
CV = CVRelationshipType
elif cvtype == 'Result Type':
CV = CVResultType
elif cvtype == 'Sampling Feature Geo-type':
CV = CVSamplingFeatureGeoType
elif cvtype == 'Sampling Feature Type':
CV = CVSamplingFeatureType
elif cvtype == 'Site Type':
CV = CVSiteType
elif cvtype == 'Spatial Offset Type':
CV = CVSpatialOffsetType
elif cvtype == 'Speciation':
CV = CVSpeciation
elif cvtype == 'Specimen Type':
CV = CVSpecimenType
elif cvtype == 'Status':
CV = CVStatus
elif cvtype == 'Taxonomic Classifier Type':
CV = CVTaxonomicClassifierType
elif cvtype == 'Units Type':
CV = CVUnitsType
elif cvtype == 'Variable Name':
CV = CVVariableName
elif cvtype == 'Variable Type':
CV = CVVariableType
else:
return None
try:
return self._session.query(CV).all()
except Exception as e:
print('Error running Query: {}'.format(e))
# Core
def getDetailedAffiliationInfo(self):
"""
* Pass Nothing - Return a list of all Affiliations with detailed information,
including Affiliation, People and Organization
"""
q = self._session.query(Affiliations, People, Organizations) \
.filter(Affiliations.PersonID == People.PersonID) \
.filter(Affiliations.OrganizationID == Organizations.OrganizationID)
affiliationList = []
for a, p, o in q.all():
detailedAffiliation = DetailedAffiliation(a, p, o)
affiliationList.append(detailedAffiliation)
return affiliationList
def getDetailedResultInfo(self, resultTypeCV=None, resultID=None, sfID=None):
# TODO can this be done by just getting the result object and drilling down?
# What is the performance comparison.
"""
Get detailed information for all selected Results including , unit info, site info,
method info , ProcessingLevel info.
* Pass nothing - return a list of all objects
* Pass resultTypeCV - All objects of given type
* Pass a result ID - single object with the given result ID
* Pass a SamplingFeatureID - All objects associated with the given sampling feature.
"""
q = self._session.query(
Actions,
Results,
SamplingFeatures.SamplingFeatureCode,
SamplingFeatures.SamplingFeatureName,
Methods,
Variables,
ProcessingLevels,
Units).filter(Results.VariableID == Variables.VariableID) \
.filter(Results.UnitsID == Units.UnitsID) \
.filter(Results.FeatureActionID == FeatureActions.FeatureActionID) \
.filter(FeatureActions.SamplingFeatureID == SamplingFeatures.SamplingFeatureID) \
.filter(FeatureActions.ActionID == Actions.ActionID) \
.filter(Actions.MethodID == Methods.MethodID) \
.filter(Results.ProcessingLevelID == ProcessingLevels.ProcessingLevelID) \
.filter(Results.ResultTypeCV == resultTypeCV) \
.order_by(Results.ResultID)
resultList = []
if sfID:
q = q.filter(SamplingFeatures.SamplingFeatureID == sfID)
if resultID:
q = q.filter(Results.ResultID == resultID)
for a, r, sc, sn, m, v, p, u in q.all():
detailedResult = DetailedResult(
a, r, sc, sn, m, v, p, u
)
resultList.append(detailedResult)
return resultList
# Taxonomic Classifiers
def getTaxonomicClassifiers(self):
"""
getTaxonomicClassifiers(self):
* Pass nothing - return a list of all objects
"""
return self._session.query(TaxonomicClassifiers).all()
# Variable
def getVariables(self, ids=None, codes=None, sitecode=None, results=False):
"""
* Pass nothing - returns full list of variable objects
* Pass a list of VariableID - returns a single variable object
* Pass a list of VariableCode - returns a single variable object
* Pass a SiteCode - returns a list of Variable objects that are collected at the given site.
* Pass whether or not you want to return the sampling features that have results associated with them
"""
if sitecode:
try:
variables = [
x[0] for x in
self._session.query(distinct(Results.VariableID))
.filter(Results.FeatureActionID == FeatureActions.FeatureActionID)
.filter(FeatureActions.SamplingFeatureID == SamplingFeatures.SamplingFeatureID)
.filter(SamplingFeatures.SamplingFeatureCode == sitecode).all()
]
if ids:
ids = list(set(ids).intersection(variables))
else:
ids = variables
except Exception as e:
print('Error running Query: {}'.format(e))
pass
if results:
try:
variables = [x[0] for x in self._session.query(distinct(Results.VariableID)).all()]
if ids:
ids = list(set(ids).intersection(variables))
else:
ids = variables
except Exception as e:
print('Error running Query: {}'.format(e))
pass
query = self._session.query(Variables)
if ids:
query = query.filter(Variables.VariableID.in_(ids))
if codes:
query = query.filter(Variables.VariableCode.in_(codes))
try:
return query.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# Method
def getMethods(self, ids=None, codes=None, methodtype=None, **kwargs):
"""
* Pass nothing - returns full list of method objects
* Pass a list of MethodIDs - returns a single method object for each given id
* Pass a list of MethodCode - returns a single method object for each given code
* Pass a MethodType - returns a list of method objects of the given MethodType
"""
self._check_kwargs(['type'], kwargs)
if 'type' in kwargs:
warnings.warn("The parameter 'type' is deprecated. Please use the medtype parameter instead.",
DeprecationWarning, stacklevel=2)
methodtype = kwargs['type']
q = self._session.query(Methods)
if ids:
q = q.filter(Methods.MethodID.in_(ids))
if codes:
q = q.filter(Methods.MethodCode.in_(codes))
if methodtype:
q = q.filter_by(MethodTypeCV=methodtype)
try:
return q.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# ProcessingLevel
def getProcessingLevels(self, ids=None, codes=None):
"""
Retrieve a list of Processing Levels
If no arguments are passed to the function, or their values are None,
all Processing Levels objects in the database will be returned.
Args:
ids (list, optional): List of Processing Levels IDs.
codes (list, optional): List of Processing Levels Codes.
Returns:
list: List of ProcessingLevels Objects
Examples:
>>> READ = ReadODM2(SESSION_FACTORY)
>>> READ.getProcessingLevels(ids=[1, 3])
>>> READ.getProcessingLevels(codes=['L1', 'L3'])
"""
q = self._session.query(ProcessingLevels)
if ids:
q = q.filter(ProcessingLevels.ProcessingLevelID.in_(ids))
if codes:
q = q.filter(ProcessingLevels.ProcessingLevelCode.in_(codes))
try:
return q.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# Sampling Feature
def getSamplingFeatures(self, ids=None, codes=None, uuids=None,
sftype=None, wkt=None, results=False, **kwargs):
"""Retrieve a list of Sampling Feature objects.
If no arguments are passed to the function, or their values are None,
all Sampling Feature objects in the database will be returned.
Args:
ids (list, optional): List of SamplingFeatureIDs.
codes (list, optional): List of SamplingFeature Codes.
uuids (list, optional): List of UUIDs string.
sftype (str, optional): Type of Sampling Feature from
`controlled vocabulary name <http://vocabulary.odm2.org/samplingfeaturetype/>`_.
wkt (str, optional): SamplingFeature Well Known Text.
results (bool, optional): Whether or not you want to return only the
sampling features that have results associated with them.
Returns:
list: List of Sampling Feature objects
Examples:
>>> READ = ReadODM2(SESSION_FACTORY)
>>> READ.getSamplingFeatures(ids=[39, 40])
>>> READ.getSamplingFeatures(codes=['HOME', 'FIELD'])
>>> READ.getSamplingFeatures(uuids=['a6f114f1-5416-4606-ae10-23be32dbc202',
... '5396fdf3-ceb3-46b6-aaf9-454a37278bb4'])
>>> READ.getSamplingFeatures(type='Site')
>>> READ.getSamplingFeatures(wkt='POINT (30 10)')
>>> READ.getSamplingFeatures(results=True)
>>> READ.getSamplingFeatures(type='Site', results=True)
"""
self._check_kwargs(['type'], kwargs)
if 'type' in kwargs:
warnings.warn("The parameter 'type' is deprecated. Please use the sftype parameter instead.",
DeprecationWarning, stacklevel=2)
sftype = kwargs['type']
if results:
try:
fas = [x[0] for x in self._session.query(distinct(Results.FeatureActionID)).all()]
except Exception as e:
print('Error running Query: {}'.format(e))
return None
sf = [x[0] for x in self._session.query(distinct(FeatureActions.SamplingFeatureID)).filter(FeatureActions.FeatureActionID.in_(fas)).all()] # noqa
if ids:
ids = list(set(ids).intersection(sf))
else:
ids = sf
q = self._session.query(SamplingFeatures)
if sftype:
q = q.filter_by(SamplingFeatureTypeCV=sftype)
if ids:
q = q.filter(SamplingFeatures.SamplingFeatureID.in_(ids))
if codes:
q = q.filter(SamplingFeatures.SamplingFeatureCode.in_(codes))
if uuids:
q = q.filter(SamplingFeatures.SamplingFeatureUUID.in_(uuids))
if wkt:
q = q.filter_by(FeatureGeometryWKT=wkt)
try:
return q.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
def getRelatedSamplingFeatures(self, sfid=None, rfid=None, relationshiptype=None):
# TODO: add functionality to filter by code
"""
* Pass a SamplingFeatureID - get a list of sampling feature objects
related to the input sampling feature
* Pass a RelatedFeatureID - get a list of Sampling features objects through the related feature
* Pass a RelationshipTypeCV - get a list of sampling feature objects with the given type
"""
sf = self._session.query(distinct(SamplingFeatures.SamplingFeatureID)) \
.select_from(RelatedFeatures)
if sfid:
sf = sf.join(RelatedFeatures.RelatedFeatureObj).filter(RelatedFeatures.SamplingFeatureID == sfid)
if rfid:
sf = sf.join(RelatedFeatures.SamplingFeatureObj).filter(RelatedFeatures.RelatedFeatureID == rfid)
if relationshiptype:
sf = sf.filter(RelatedFeatures.RelationshipTypeCV == relationshiptype)
try:
sfids = [x[0] for x in sf.all()]
if len(sfids) > 0:
sflist = self.getSamplingFeatures(ids=sfids)
return sflist
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# Action
def getActions(self, ids=None, acttype=None, sfid=None, **kwargs):
"""
* Pass nothing - returns a list of all Actions
* Pass a list of Action ids - returns a list of Action objects
* Pass a ActionTypeCV - returns a list of Action objects of that type
* Pass a SamplingFeature ID - returns a list of Action objects
associated with that Sampling feature ID, Found through featureAction table
"""
self._check_kwargs(['type'], kwargs)
if 'type' in kwargs:
warnings.warn("The parameter 'type' is deprecated. Please use the acttype parameter instead.",
DeprecationWarning, stacklevel=2)
acttype = kwargs['type']
a = Actions
if acttype == 'equipment':
a = EquipmentActions
elif acttype == 'calibration':
a = CalibrationActions
elif acttype == 'maintenance':
a = MaintenanceActions
q = self._session.query(a)
if ids:
q = q.filter(a.ActionID.in_(ids))
if sfid:
q = q.join(FeatureActions).filter(FeatureActions.SamplingFeatureID == sfid)
try:
return q.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
def getRelatedActions(self, actionid=None):
"""
* Pass an ActionID - get a list of Action objects related to the input
action along with the relationship type
"""
q = self._session.query(Actions).select_from(RelatedActions).join(RelatedActions.RelatedActionObj)
if actionid:
q = q.filter(RelatedActions.ActionID == actionid)
try:
return q.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# Unit
def getUnits(self, ids=None, name=None, unittype=None, **kwargs):
"""
* Pass nothing - returns a list of all units objects
* Pass a list of UnitsID - returns a single units object for the given id
* Pass UnitsName - returns a single units object
* Pass a type- returns a list of all objects of the given type
"""
self._check_kwargs(['type'], kwargs)
if 'type' in kwargs:
warnings.warn("The parameter 'type' is deprecated. Please use the unittype parameter instead.",
DeprecationWarning, stacklevel=2)
unittype = kwargs['type']
q = self._session.query(Units)
if ids:
q = q.filter(Units.UnitsID.in_(ids))
if name:
q = q.filter(Units.UnitsName.ilike(name))
if unittype:
q = q.filter(Units.UnitsTypeCV.ilike(unittype))
try:
return q.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# Organization
def getOrganizations(self, ids=None, codes=None):
"""
* Pass nothing - returns a list of all organization objects
* Pass a list of OrganizationID - returns a single organization object
* Pass a list of OrganizationCode - returns a single organization object
"""
q = self._session.query(Organizations)
if ids:
q = q.filter(Organizations.OrganizationID.in_(ids))
if codes:
q = q.filter(Organizations.OrganizationCode.in_(codes))
try:
return q.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# Person
def getPeople(self, ids=None, firstname=None, lastname=None):
"""
* Pass nothing - returns a list of all People objects
* Pass a list of PeopleID - returns a single People object
* Pass a First Name - returns a single People object
* Pass a Last Name - returns a single People object
"""
q = self._session.query(People)
if ids:
q = q.filter(People.PersonID.in_(ids))
if firstname:
q = q.filter(People.PersonFirstName.ilike(firstname))
if lastname:
q = q.filter(People.PersonLastName.ilike(lastname))
try:
return q.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
def getAffiliations(self, ids=None, personfirst=None, personlast=None, orgcode=None):
"""Retrieve a list of Affiliation objects.
If no arguments are passed to the function, or their values are None,
all Affiliation objects in the database will be returned.
Args:
ids (list, optional): List of AffiliationIDs.
personfirst (str, optional): Person First Name.
personlast (str, optional): Person Last Name.
orgcode (str, optional): Organization Code.
Returns:
list: List of Affiliation objects
Examples:
>>> ReadODM2.getAffiliations(ids=[39,40])
>>> ReadODM2.getAffiliations(personfirst='John',
... personlast='Smith')
>>> ReadODM2.getAffiliations(orgcode='Acme')
"""
q = self._session.query(Affiliations)
if ids:
q = q.filter(Affiliations.AffiliationID.in_(ids))
if orgcode:
q = q.join(Affiliations.OrganizationObj).filter(Organizations.OrganizationCode.ilike(orgcode))
if personfirst:
q = q.join(Affiliations.PersonObj).filter(People.PersonFirstName.ilike(personfirst))
if personlast:
q = q.join(Affiliations.PersonObj).filter(People.PersonLastName.ilike(personlast))
try:
return q.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# Results
def getResults(self, ids=None, restype=None, uuids=None, actionid=None, simulationid=None,
variableid=None, siteid=None, sfids=None, sfuuids=None, sfcodes=None, **kwargs):
# TODO what if user sends in both type and actionid vs just actionid
"""Retrieve a list of Result objects.
If no arguments are passed to the function, or their values are None,
all Result objects in the database will be returned.
Args:
ids (list, optional): List of ResultIDs.
restype (str, optional): Type of Result from
`controlled vocabulary name <http://vocabulary.odm2.org/resulttype/>`_.
uuids (list, optional): List of UUIDs string.
actionid (int, optional): ActionID.
simulationid (int, optional): SimulationID.
variableid (int, optional): VariableID.
siteid (int, optional): SiteID. - goes through related features table and finds all of results
recorded at the given site
sfids(list, optional): List of Sampling Feature IDs integer.
sfuuids(list, optional): List of Sampling Feature UUIDs string.
sfcodes=(list, optional): List of Sampling Feature codes string.
Returns:
list: List of Result objects
Examples:
>>> ReadODM2.getResults(ids=[39,40])
>>> ReadODM2.getResults(restype='Time series coverage')
>>> ReadODM2.getResults(sfids=[65])
>>> ReadODM2.getResults(uuids=['a6f114f1-5416-4606-ae10-23be32dbc202',
... '5396fdf3-ceb3-46b6-aaf9-454a37278bb4'])
>>> ReadODM2.getResults(simulationid=50)
>>> ReadODM2.getResults(siteid=6)
>>> ReadODM2.getResults(variableid=7)
>>> ReadODM2.getResults(actionid=20)
"""
query = self._session.query(Results)
self._check_kwargs(['type', 'sfid'], kwargs)
if 'type' in kwargs:
warnings.warn("The parameter 'type' is deprecated. Please use the restype parameter instead.",
DeprecationWarning, stacklevel=2)
restype = kwargs['type']
if restype:
query = query.filter_by(ResultTypeCV=restype)
if variableid:
query = query.filter_by(VariableID=variableid)
if ids:
query = query.filter(Results.ResultID.in_(ids))
if uuids:
query = query.filter(Results.ResultUUID.in_(uuids))
if simulationid:
query = query.join(FeatureActions) \
.join(Actions) \
.join(Simulations) \
.filter_by(SimulationID=simulationid)
if actionid:
query = query.join(FeatureActions).filter_by(ActionID=actionid)
if 'sfid' in kwargs:
warnings.warn("The parameter 'sfid' is deprecated. " +
"Please use the sfids parameter instead and send in a list.", # noqa
DeprecationWarning, stacklevel=2)
if kwargs['sfid']:
query = query.join(FeatureActions).filter_by(SamplingFeatureID=kwargs['sfid'])
if sfids or sfcodes or sfuuids:
sf_list = self.getSamplingFeatures(ids=sfids, codes=sfcodes, uuids=sfuuids)
sfids = []
for sf in sf_list:
sfids.append(sf.SamplingFeatureID)
query = query.join(FeatureActions).filter(FeatureActions.SamplingFeatureID.in_(sfids))
if siteid:
sfids = [x[0] for x in self._session.query(
distinct(SamplingFeatures.SamplingFeatureID))
.select_from(RelatedFeatures)
.join(RelatedFeatures.SamplingFeatureObj)
.filter(RelatedFeatures.RelatedFeatureID == siteid)
.all()
]
# TODO does this code do the same thing as the code above?
# sf_list = self.getRelatedSamplingFeatures(rfid=siteid)
# sfids = []
# for sf in sf_list:
# sfids.append(sf.SamplingFeatureID)
query = query.join(FeatureActions).filter(FeatureActions.SamplingFeatureID.in_(sfids))
try:
return query.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# Datasets
def getDataSets(self, ids=None, codes=None, uuids=None, dstype=None):
"""
Retrieve a list of Datasets
Args:
ids (list, optional): List of DataSetsIDs.
codes (list, optional): List of DataSet Codes.
uuids (list, optional): List of Dataset UUIDs string.
dstype (str, optional): Type of Dataset from
`controlled vocabulary name <http://vocabulary.odm2.org/datasettype/>`_.
Returns:
list: List of DataSets Objects
Examples:
>>> READ = ReadODM2(SESSION_FACTORY)
>>> READ.getDataSets(ids=[39, 40])
>>> READ.getDataSets(codes=['HOME', 'FIELD'])
>>> READ.getDataSets(uuids=['a6f114f1-5416-4606-ae10-23be32dbc202',
... '5396fdf3-ceb3-46b6-aaf9-454a37278bb4'])
>>> READ.getDataSets(dstype='singleTimeSeries')
"""
q = self._session.query(DataSets)
if ids:
q = q.filter(DataSets.DataSetID.in_(ids))
if codes:
q = q.filter(DataSets.DataSetCode.in_(codes))
if uuids:
q.filter(DataSets.DataSetUUID.in_(uuids))
if dstype:
q = q.filter(DataSets.DataSetTypeCV == dstype)
try:
return q.all()
except Exception as e:
print('Error running Query {}'.format(e))
return None
# Datasets
def getDataSetsResults(self, ids=None, codes=None, uuids=None, dstype=None):
"""
Retrieve a detailed list of Datasets along with detailed metadata about the datasets
and the results contained within them
**Must specify either DataSetID OR DataSetUUID OR DataSetCode)**
Args:
ids (list, optional): List of DataSetsIDs.
codes (list, optional): List of DataSet Codes.
uuids (list, optional): List of Dataset UUIDs string.
dstype (str, optional): Type of Dataset from
`controlled vocabulary name <http://vocabulary.odm2.org/datasettype/>`_.
Returns:
list: List of DataSetsResults Objects
Examples:
>>> READ = ReadODM2(SESSION_FACTORY)
>>> READ.getDataSetsResults(ids=[39, 40])
>>> READ.getDataSetsResults(codes=['HOME', 'FIELD'])
>>> READ.getDataSetsResults(uuids=['a6f114f1-5416-4606-ae10-23be32dbc202',
... '5396fdf3-ceb3-46b6-aaf9-454a37278bb4'])
>>> READ.getDataSetsResults(dstype='singleTimeSeries')
"""
# make sure one of the three arguments has been sent in
if all(v is None for v in [ids, codes, uuids]):
raise ValueError('Expected DataSetID OR DataSetUUID OR DataSetCode argument')
q = self._session.query(DataSetsResults) \
.join(DataSets)
if ids:
q = q.filter(DataSets.DataSetID.in_(ids))
if codes:
q = q.filter(DataSets.DataSetCode.in_(codes))
if uuids:
q.filter(DataSets.DataSetUUID.in_(uuids))
if dstype:
q = q.filter(DataSets.DataSetTypeCV == dstype)
try:
return q.all()
except Exception as e:
print('Error running Query {}'.format(e))
return None
def getDataSetsValues(self, ids=None, codes=None, uuids=None, dstype=None, lowercols=True):
"""
Retrieve a list of datavalues associated with the given dataset info
**Must specify either DataSetID OR DataSetUUID OR DataSetCode)**
Args:
ids (list, optional): List of DataSetsIDs.
codes (list, optional): List of DataSet Codes.
uuids (list, optional): List of Dataset UUIDs string.
dstype (str, optional): Type of Dataset from
`controlled vocabulary name <http://vocabulary.odm2.org/datasettype/>`_.
lowercols (bool, optional): Make column names to be lowercase.
Default to True.
**Please start upgrading your code to rely on CamelCase column names,
In a near-future release,
the default will be changed to False,
and later the parameter may be removed**.
Returns:
list: List of Result Values Objects
Examples:
>>> READ = ReadODM2(SESSION_FACTORY)
>>> READ.getDataSetsValues(ids=[39, 40])
>>> READ.getDataSetsValues(codes=['HOME', 'FIELD'])
>>> READ.getDataSetsValues(uuids=['a6f114f1-5416-4606-ae10-23be32dbc202',
... '5396fdf3-ceb3-46b6-aaf9-454a37278bb4'])
>>> READ.getDataSetsValues(dstype='singleTimeSeries', lowercols=False)
"""
dsr = self.getDataSetsResults(ids, codes, uuids, dstype)
resids = []
for ds in dsr:
resids.append(ds.ResultID)
try:
return self.getResultValues(resultids=resids, lowercols=lowercols)
except Exception as e:
print('Error running Query {}'.format(e))
return None
def getSamplingFeatureDatasets(self, ids=None, codes=None, uuids=None, dstype=None, sftype=None):
"""
Retrieve a list of Datasets associated with the given sampling feature data.
**Must specify either samplingFeatureID OR samplingFeatureUUID OR samplingFeatureCode)**
Args:
ids (list, optional): List of SamplingFeatureIDs.
codes (list, optional): List of SamplingFeature Codes.
uuids (list, optional): List of UUIDs string.
dstype (str, optional): Type of Dataset from
`controlled vocabulary name <http://vocabulary.odm2.org/datasettype/>`_.
sftype (str, optional): Type of SamplingFeature from
`controlled vocabulary name <http://vocabulary.odm2.org/samplingfeaturetype/>`_.
Returns:
list: List of DataSetsResults Objects associated with the given sampling feature
Examples:
>>> READ = ReadODM2(SESSION_FACTORY)
>>> READ.getSamplingFeatureDatasets(ids=[39, 40])
>>> READ.getSamplingFeatureDatasets(codes=['HOME', 'FIELD'])
>>> READ.getSamplingFeatureDatasets(uuids=['a6f114f1-5416-4606-ae10-23be32dbc202',
... '5396fdf3-ceb3-46b6-aaf9-454a37278bb4'])
>>> READ.getSamplingFeatureDatasets(dstype='singleTimeSeries')
>>> READ.getSamplingFeatureDatasets(sftype='Specimen')
"""
# make sure one of the three arguments has been sent in
if all(v is None for v in [ids, codes, uuids, sftype]):
raise ValueError(
'Expected samplingFeatureID OR samplingFeatureUUID '
'OR samplingFeatureCode OR samplingFeatureType '
'argument')
sf_query = self._session.query(SamplingFeatures)
if sftype:
sf_query = sf_query.filter(SamplingFeatures.SamplingFeatureTypeCV == sftype)
if ids:
sf_query = sf_query.filter(SamplingFeatures.SamplingFeatureID.in_(ids))
if codes:
sf_query = sf_query.filter(SamplingFeatures.SamplingFeatureCode.in_(codes))
if uuids:
sf_query = sf_query.filter(SamplingFeatures.SamplingFeatureUUID.in_(uuids))
sf_list = []
for sf in sf_query.all():
sf_list.append(sf)
try:
sfds = []
for sf in sf_list:
# Eager loading the data.
q = self._session.query(DataSetsResults)\
.join(DataSetsResults.ResultObj)\
.join(Results.FeatureActionObj)\
.filter(FeatureActions.SamplingFeatureID == sf.SamplingFeatureID)\
.options(contains_eager(DataSetsResults.ResultObj)
.contains_eager(Results.FeatureActionObj)
.load_only(FeatureActions.SamplingFeatureID))
if dstype:
q = q.filter_by(DatasetTypeCV=dstype)
vals = q.all()
related = self.getRelatedSamplingFeatures(sf.SamplingFeatureID)
sfds.append(SamplingFeatureDataSet(sf, vals, related))
except Exception as e:
print('Error running Query: {}'.format(e))
return None
return sfds
# Data Quality
def getDataQuality(self):
"""
* Pass nothing - return a list of all objects
"""
return self._session.query(DataQuality).all()
# TODO DataQuality Schema Queries
def getReferenceMaterials(self):
"""
* Pass nothing - return a list of all objects
"""
return self._session.query(ReferenceMaterials).all()
def getReferenceMaterialValues(self):
"""
* Pass nothing - return a list of all objects
"""
return self._session.query(ReferenceMaterialValues).all()
def getResultNormalizationValues(self):
"""
* Pass nothing - return a list of all objects
"""
return self._session.query(ResultNormalizationValues).all()
def getResultsDataQuality(self):
"""
* Pass nothing - return a list of all objects
"""
return self._session.query(ResultsDataQuality).all()
# TODO Equipment Schema Queries
# Equipment
def getEquipment(self, codes=None, equiptype=None, sfid=None, actionid=None, **kwargs):
"""
* Pass nothing - returns a list of all Equipment objects
* Pass a list of EquipmentCodes- return a list of all Equipment objects that match each of the codes
* Pass a EquipmentType - returns a single Equipment object
* Pass a SamplingFeatureID - returns a single Equipment object
* Pass an ActionID - returns a single Equipment object
"""
self._check_kwargs(['type'], kwargs)
if 'type' in kwargs:
warnings.warn("The parameter 'type' is deprecated. Please use the equiptype parameter instead.",
DeprecationWarning, stacklevel=2)
equiptype = kwargs['type']
# NOTE: Equiptype currently unused!
if equiptype:
pass
e = self._session.query(Equipment)
if sfid:
e = e.join(EquipmentUsed) \
.join(Actions) \
.join(FeatureActions) \
.filter(FeatureActions.SamplingFeatureID == sfid)
if codes:
e = e.filter(Equipment.EquipmentCode.in_(codes))
if actionid:
e = e.join(EquipmentUsed).join(Actions) \
.filter(Actions.ActionID == actionid)
return e.all()
def CalibrationReferenceEquipment(self):
"""
* Pass nothing - return a list of all objects
"""
return self._session.query(CalibrationReferenceEquipment).all()
def CalibrationStandards(self):
"""
* Pass nothing - return a list of all objects
"""
return self._session.query(CalibrationStandards).all()
def DataloggerFileColumns(self):
"""
* Pass nothing - return a list of all objects
"""
return self._session.query(DataLoggerFileColumns).all()
def DataLoggerFiles(self):
"""
* Pass nothing - return a list of all objects
"""
return self._session.query(DataLoggerFiles).all()
def DataloggerProgramFiles(self):
"""
* Pass Nothing - return a list of all objects
"""
return self._session.query(DataLoggerProgramFiles).all()
def EquipmentModels(self):
"""
* Pass Nothing - return a list of all objects
"""
return self._session.query(EquipmentModels).all()
def EquipmentUsed(self):
"""
* Pass Nothing - return a list of all objects
"""
return self._session.query(EquipmentUsed).all()
def InstrumentOutputVariables(self, modelid=None, variableid=None):
"""
* Pass Nothing - return a list of all objects
* Pass ModelID
* Pass VariableID
"""
i = self._session.query(InstrumentOutputVariables)
if modelid:
i = i.filter_by(ModelID=modelid)
if variableid:
i = i.filter_by(VariableID=variableid)
return i.all()
def RelatedEquipment(self, code=None):
"""
* Pass nothing - return a list of all objects
* Pass code- return a single object with the given code
"""
r = self._session.query(RelatedEquipment)
if code:
r = r.filter_by(EquipmentCode=code)
return r.all()
# Extension Properties
def getExtensionProperties(self, exptype=None, **kwargs):
"""
* Pass nothing - return a list of all objects
* Pass type- return a list of all objects of the given type
"""
# Todo what values to use for extensionproperties type
self._check_kwargs(['type'], kwargs)
if 'type' in kwargs:
warnings.warn("The parameter 'type' is deprecated. Please use the exptype parameter instead.",
DeprecationWarning, stacklevel=2)
exptype = kwargs['type']
e = ExtensionProperties
if exptype == 'action':
e = ActionExtensionPropertyValues
elif exptype == 'citation':
e = CitationExtensionPropertyValues
elif exptype == 'method':
e = MethodExtensionPropertyValues
elif exptype == 'result':
e = ResultExtensionPropertyValues
elif exptype == 'samplingfeature':
e = SamplingFeatureExtensionPropertyValues
elif exptype == 'variable':
e = VariableExtensionPropertyValues
try:
return self._session.query(e).all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# External Identifiers
def getExternalIdentifiers(self, eitype=None, **kwargs):
"""
* Pass nothing - return a list of all objects
* Pass type- return a list of all objects of the given type
"""
self._check_kwargs(['type'], kwargs)
if 'type' in kwargs:
warnings.warn("The parameter 'type' is deprecated. Please use the eitype parameter instead.",
DeprecationWarning, stacklevel=2)
eitype = kwargs['type']
e = ExternalIdentifierSystems
if eitype.lowercase == 'citation':
e = CitationExternalIdentifiers
elif eitype == 'method':
e = MethodExternalIdentifiers
elif eitype == 'person':
e = PersonExternalIdentifiers
elif eitype == 'referencematerial':
e = ReferenceMaterialExternalIdentifiers
elif eitype == 'samplingfeature':
e = SamplingFeatureExternalIdentifiers
elif eitype == 'spatialreference':
e = SpatialReferenceExternalIdentifiers
elif eitype == 'taxonomicclassifier':
e = TaxonomicClassifierExternalIdentifiers
elif eitype == 'variable':
e = VariableExternalIdentifiers
try:
return self._session.query(e).all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# TODO functions for Lab Analyses
# Lab Analyses
def getDirectives(self):
"""
getDirectives(self)
* Pass nothing - return a list of all objects
"""
return self._session.query(Directives).all()
def getActionDirectives(self):
"""
getActionDirectives(self)
* Pass nothing - return a list of all objects
"""
return self._session.query(ActionDirectives).all()
def getSpecimenBatchPositions(self):
"""
getSpecimenBatchPositions(self)
* Pass nothing - return a list of all objects
"""
return self._session.query(SpecimenBatchPositions).all()
# TODO functions for Provenance
# Provenance
def getAuthorLists(self):
"""
getAuthorLists(self)
* Pass nothing - return a list of all objects
"""
return self._session.query(AuthorLists).all()
def getDatasetCitations(self):
"""
getDatasetCitations(self)
* Pass nothing - return a list of all objects
"""
return self._session.query(DataSetCitations).all()
def getDerivationEquations(self):
"""
getDerivationEquations(self)
* Pass nothing - return a list of all objects
"""
return self._session.query(DerivationEquations).all()
def getMethodCitations(self):
"""
getMethodCitations(self)
* Pass nothing - return a list of all objects
"""
return self._session.query(MethodCitations).all()
def getRelatedAnnotations(self):
"""
getRelatedAnnotations(self)
* Pass nothing - return a list of all objects
"""
return self._session.query(RelatedAnnotations).all()
def getRelatedCitations(self):
"""
getRelatedCitations(self)
* Pass nothing - return a list of all objects
"""
return self._session.query(RelatedCitations).all()
def getRelatedDatasets(self):
"""
getRelatedDatasets(self)
* Pass nothing - return a list of all objects
"""
return self._session.query(RelatedDataSets).all()
def getRelatedResults(self):
"""
getRelatedResults(self)
* Pass nothing - return a list of all objects
"""
return self._session.query(RelatedResults).all()
def getResultDerivationEquations(self):
"""
getResultDerivationEquations(self)
* Pass nothing - return a list of all objects
"""
return self._session.query(ResultDerivationEquations).all()
def getResultValues(self, resultids, starttime=None, endtime=None, lowercols=True):
"""
Retrieve result values associated with the given result.
**The resultids must be associated with the same result type**
Args:
resultids (list): List of SamplingFeatureIDs.
starttime (object, optional): Start time to filter by as datetime object.
endtime (object, optional): End time to filter by as datetime object.
lowercols (bool, optional): Make column names to be lowercase.
Default to True.
**Please start upgrading your code to rely on CamelCase column names,
In a near-future release,
the default will be changed to False,
and later the parameter may be removed**.
Returns:
DataFrame: Pandas dataframe of result values.
Examples:
>>> READ = ReadODM2(SESSION_FACTORY)
>>> READ.getResultValues(resultids=[10, 11])
>>> READ.getResultValues(resultids=[100, 20, 34], starttime=datetime.today())
>>> READ.getResultValues(resultids=[1, 2, 3, 4],
>>> starttime=datetime(2000, 01, 01),
>>> endtime=datetime(2003, 02, 01), lowercols=False)
"""
restype = self._session.query(Results).filter_by(ResultID=resultids[0]).first().ResultTypeCV
ResultValues = TimeSeriesResultValues
if 'categorical' in restype.lower():
ResultValues = CategoricalResultValues
elif 'measurement' in restype.lower():
ResultValues = MeasurementResultValues
elif 'point' in restype.lower():
ResultValues = PointCoverageResultValues
elif 'profile' in restype.lower():
ResultValues = ProfileResultValues
elif 'section' in restype.lower():
ResultValues = SectionResults
elif 'spectra' in restype.lower():
ResultValues = SpectraResultValues
elif 'time' in restype.lower():
ResultValues = TimeSeriesResultValues
elif 'trajectory' in restype.lower():
ResultValues = TrajectoryResultValues
elif 'transect' in restype.lower():
ResultValues = TransectResultValues
q = self._session.query(ResultValues).filter(ResultValues.ResultID.in_(resultids))
if starttime:
q = q.filter(ResultValues.ValueDateTime >= starttime)
if endtime:
q = q.filter(ResultValues.ValueDateTime <= endtime)
try:
# F841 local variable 'vals' is assigned to but never used
# vals = q.order_by(ResultType.ValueDateTime)
query = q.statement.compile(dialect=self._session_factory.engine.dialect)
df = pd.read_sql_query(
sql=query,
con=self._session_factory.engine,
params=query.params
)
if not lowercols:
df.columns = [self._get_columns(ResultValues)[c] for c in df.columns]
else:
warnings.warn(
"In a near-future release, " + # noqa
"the parameter 'lowercols' default will be changed to False, " +
"and later the parameter may be removed.", # noqa
DeprecationWarning, stacklevel=2)
return df
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# SamplingFeatures
# Site
def getSpatialReferences(self, srsCodes=None):
"""
getSpatialReferences(self, srsCodes=None)
* Pass nothing - return a list of all Spatial References
* Pass in a list of SRS Codes-
"""
q = self._session.query(SpatialReferences)
if srsCodes:
q.filter(SpatialReferences.SRSCode.in_(srsCodes))
try:
return q.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
# Simulation
def getSimulations(self, name=None, actionid=None):
"""
getSimulations(self, name=None, actionid=None)
* Pass nothing - get a list of all converter simuation objects
* Pass a SimulationName - get a single simulation object
* Pass an ActionID - get a single simulation object
"""
s = self._session.query(Simulations)
if name:
s = s.filter(Simulations.SimulationName.ilike(name))
if actionid:
s = s.filter_by(ActionID=actionid)
try:
return s.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
def getModels(self, codes=None):
"""
getModels(self, codes=None)
* Pass nothing - return a list of all Model Objects
* Pass a list of ModelCodes - get a list of converter objects related to the converter having ModeCode
"""
m = self._session.query(Models)
if codes:
m = m.filter(Models.ModelCode.in_(codes))
try:
return m.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
def getRelatedModels(self, modid=None, code=None, **kwargs):
"""
getRelatedModels(self, id=None, code=None)
* Pass a ModelID - get a list of converter objects related to the converter having ModelID
* Pass a ModelCode - get a list of converter objects related to the converter having ModeCode
"""
self._check_kwargs(['id'], kwargs)
if 'id' in kwargs:
warnings.warn("The parameter 'id' is deprecated. Please use the modid parameter instead.",
DeprecationWarning, stacklevel=2)
modid = kwargs['id']
m = self._session.query(Models).select_from(RelatedModels).join(RelatedModels.ModelObj)
if modid:
m = m.filter(RelatedModels.ModelID == modid)
if code:
m = m.filter(Models.ModelCode == code)
try:
return m.all()
except Exception as e:
print('Error running Query: {}'.format(e))
return None
| {
"repo_name": "ODM2/ODM2PythonAPI",
"path": "odm2api/services/readService.py",
"copies": "1",
"size": "59584",
"license": "bsd-3-clause",
"hash": -4594977475727984600,
"line_mean": 38.6433799069,
"line_max": 158,
"alpha_frac": 0.5968380773,
"autogenerated": false,
"ratio": 4.4211619796690655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5518000056969066,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import warnings
import datashape
from datashape import String, DataShape, Option, bool_
from odo.utils import copydoc
from .expressions import schema_method_list, ElemWise
from .arithmetic import Interp, Repeat, _mkbin, repeat, interp, _add, _radd
from ..compatibility import basestring, _inttypes, builtins
from ..deprecation import deprecated
__all__ = ['Like',
'like',
'Pad',
'Replace',
'SliceReplace',
# prevent 'len' to end up in global namespace
#'len',
'upper',
'lower',
'cat',
'isalnum',
'isalpha',
'isdecimal',
'isdigit',
'islower',
'isnumeric',
'isspace',
'istitle',
'isupper',
'StrCat',
'find',
'StrFind',
'StrSlice',
'slice',
'slice_replace',
'replace',
'capitalize',
'strip',
'lstrip',
'rstrip',
'pad',
'UnaryStringFunction']
def _validate(var, name, type, typename):
if not isinstance(var, type):
raise TypeError('"%s" argument must be a %s'%(name, typename))
def _validate_optional(var, name, type, typename):
if var is not None and not isinstance(var, type):
raise TypeError('"%s" argument must be a %s'%(name, typename))
class Like(ElemWise):
""" Filter expression by string comparison
>>> from blaze import symbol, like, compute
>>> t = symbol('t', 'var * {name: string, city: string}')
>>> expr = t[t.name.like('Alice*')]
>>> data = [('Alice Smith', 'New York'),
... ('Bob Jones', 'Chicago'),
... ('Alice Walker', 'LA')]
>>> list(compute(expr, data))
[('Alice Smith', 'New York'), ('Alice Walker', 'LA')]
"""
_arguments = '_child', 'pattern'
def _dshape(self):
shape, schema = self._child.dshape.shape, self._child.schema
schema = Option(bool_) if isinstance(schema.measure, Option) else bool_
return DataShape(*(shape + (schema,)))
@copydoc(Like)
def like(child, pattern):
if not isinstance(pattern, basestring):
raise TypeError('pattern argument must be a string')
return Like(child, pattern)
class UnaryStringFunction(ElemWise):
"""String function that only takes a single argument.
"""
_arguments = '_child',
class len(UnaryStringFunction):
schema = datashape.int64
class upper(UnaryStringFunction):
@property
def schema(self):
return self._child.schema
class lower(UnaryStringFunction):
@property
def schema(self):
return self._child.schema
class PredicateFunction(UnaryStringFunction):
@property
def schema(self):
return bool_ if self._child.schema == datashape.string else Option(bool_)
class isalnum(PredicateFunction): pass
class isalpha(PredicateFunction): pass
class isdecimal(PredicateFunction): pass
class isdigit(PredicateFunction): pass
class islower(PredicateFunction): pass
class isnumeric(PredicateFunction): pass
class isspace(PredicateFunction): pass
class istitle(PredicateFunction): pass
class isupper(PredicateFunction): pass
class StrFind(ElemWise):
"""
Find literal substring in string column.
"""
_arguments = '_child', 'sub'
schema = Option(datashape.int64)
@copydoc(StrFind)
def find(col, sub):
if not isinstance(sub, basestring):
raise TypeError("'sub' argument must be a String")
return StrFind(col, sub)
class Replace(ElemWise):
_arguments = '_child', 'old', 'new', 'max'
@property
def schema(self):
return self._child.schema
def replace(col, old, new, max=None):
_validate(old, 'old', basestring, 'string')
_validate(new, 'new', basestring, 'string')
_validate_optional(max, 'max', int, 'integer')
return Replace(col, old, new, max)
class Pad(ElemWise):
_arguments = '_child', 'width', 'side', 'fillchar'
@property
def schema(self):
return self._child.schema
def pad(col, width, side=None, fillchar=None):
_validate(width, 'width', int, 'integer')
if side not in (None, 'left', 'right'):
raise TypeError('"side" argument must be either "left" or "right"')
_validate_optional(fillchar, 'fillchar', basestring, 'string')
return Pad(col, width, side, fillchar)
class capitalize(UnaryStringFunction):
@property
def schema(self):
return self._child.schema
class strip(UnaryStringFunction):
@property
def schema(self):
return self._child.schema
class lstrip(UnaryStringFunction):
@property
def schema(self):
return self._child.schema
class rstrip(UnaryStringFunction):
@property
def schema(self):
return self._child.schema
class StrSlice(ElemWise):
_arguments = '_child', 'slice'
@property
def schema(self):
return self._child.schema
class SliceReplace(ElemWise):
_arguments = '_child', 'start', 'stop', 'repl'
@property
def schema(self):
return self._child.schema
def slice_replace(col, start=None, stop=None, repl=None):
_validate_optional(start, 'start', int, 'integer')
_validate_optional(stop, 'stop', int, 'integer')
_validate_optional(repl, 'repl', basestring, 'string')
return SliceReplace(col, start, stop, repl)
@copydoc(StrSlice)
def slice(col, idx):
if not isinstance(idx, (builtins.slice, _inttypes)):
raise TypeError("idx argument must be a slice or integer, given {}".format(slc))
return StrSlice(col, (idx.start, idx.stop, idx.step) if isinstance(idx, builtins.slice) else idx)
class StrCat(ElemWise):
"""
Concatenate two string columns together with optional 'sep' argument.
>>> import pandas as pd
>>> from blaze import symbol, compute, dshape
>>> ds = dshape('3 * {name: ?string, comment: ?string, num: int32}')
>>> s = symbol('s', dshape=ds)
>>> data = [('al', 'good', 0), ('suri', 'not good', 1), ('jinka', 'ok', 2)]
>>> df = pd.DataFrame(data, columns=['name', 'comment', 'num'])
>>> compute(s.name.str.cat(s.comment, sep=' -- '), df)
0 al -- good
1 suri -- not good
2 jinka -- ok
Name: name, dtype: object
For rows with null entries, it returns null. This is consistent with
default pandas behavior with kwarg: na_rep=None.
>>> data = [(None, None, 0), ('suri', 'not good', 1), ('jinka', None, 2)]
>>> df = pd.DataFrame(data, columns=['name', 'comment', 'num'])
>>> compute(s.name.str.cat(s.comment, sep=' -- '), df)
0 NaN
1 suri -- not good
2 NaN
Name: name, dtype: object
"""
_arguments = 'lhs', 'rhs', 'sep'
_input_attributes = 'lhs', 'rhs'
def _dshape(self):
'''
since pandas supports concat for string columns, do the same for blaze
'''
shape = self.lhs.dshape.shape
if isinstance(self.lhs.schema.measure, Option):
schema = self.lhs.schema
elif isinstance(self.rhs.schema.measure, Option):
schema = self.rhs.schema
else:
_, lhs_encoding = self.lhs.schema.measure.parameters
_, rhs_encoding = self.rhs.schema.measure.parameters
assert lhs_encoding == rhs_encoding
# convert fixed length string to variable length string
schema = DataShape(String(None, lhs_encoding))
return DataShape(*(shape + (schema,)))
@copydoc(StrCat)
def cat(lhs, rhs, sep=None):
"""
returns lhs + sep + rhs
Raises:
Invoking on a non string column raises a TypeError
If kwarg 'sep' is not a string, raises a TypeError
"""
# pandas supports concat for string columns only, do the same for blaze
if not isstring(rhs.dshape):
raise TypeError("can only concat string columns")
_validate_optional(sep, 'sep', basestring, 'string')
return StrCat(lhs, rhs, sep=sep)
def isstring(ds):
measure = ds.measure
return isinstance(getattr(measure, 'ty', measure), String)
_mod, _rmod = _mkbin('mod', Interp)
_mul, _rmul = _mkbin('mul', Repeat)
class str_ns(object):
def __init__(self, field):
self.field = field
def upper(self): return upper(self.field)
def lower(self): return lower(self.field)
def len(self): return len(self.field)
def like(self, pattern): return like(self.field, pattern)
def cat(self, other, sep=None): return cat(self.field, other, sep=sep)
def find(self, sub): return find(self.field, sub)
def isalnum(self): return isalnum(self.field)
def isalpha(self): return isalpha(self.field)
def isdecimal(self): return isdecimal(self.field)
def isdigit(self): return isdigit(self.field)
def islower(self): return islower(self.field)
def isnumeric(self): return isnumeric(self.field)
def isspace(self): return isspace(self.field)
def istitle(self): return istitle(self.field)
def isupper(self): return isupper(self.field)
def replace(self, old, new, max=None): return replace(self.field, old, new, max)
def capitalize(self): return capitalize(self.field)
def pad(self, width, side=None, fillchar=None): return pad(self.field, width, side, fillchar)
def strip(self): return strip(self.field)
def lstrip(self): return lstrip(self.field)
def rstrip(self): return rstrip(self.field)
def __getitem__(self, idx): return slice(self.field, idx)
def slice_replace(self, start=None, stop=None, repl=None):
return slice_replace(self.field, start, stop, repl)
class str(object):
__name__ = 'str'
def __get__(self, obj, type):
return str_ns(obj) if obj is not None else self
@deprecated('0.11', replacement='len()')
def str_len(*args, **kwds): return len(*args, **kwds)
@deprecated('0.11', replacement='upper()')
def str_upper(*args, **kwds): return upper(*args, **kwds)
@deprecated('0.11', replacement='lower()')
def str_lower(*args, **kwds): return lower(*args, **kwds)
@deprecated('0.11', replacement='cat(lhs, rhs, sep=None)')
def str_cat(*args, **kwds): return cat(*args, **kwds)
schema_method_list.extend([(isstring,
set([_add,
_radd,
_mod,
_rmod,
_mul,
_rmul,
str(),
repeat,
interp,
like,
str_len, # deprecated
str_upper, # deprecated
str_lower, # deprecated
str_cat]))]) # deprecated
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/strings.py",
"copies": "3",
"size": "10912",
"license": "bsd-3-clause",
"hash": -7644352640154388000,
"line_mean": 29.1436464088,
"line_max": 101,
"alpha_frac": 0.5998900293,
"autogenerated": false,
"ratio": 3.7941585535465925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5894048582846593,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import warnings
import numpy as np
import pandas as pd
import pkg_resources
from ..core.pycompat import basestring
from ..core.utils import is_scalar
ROBUST_PERCENTILE = 2.0
def _load_default_cmap(fname='default_colormap.csv'):
"""
Returns viridis color map
"""
from matplotlib.colors import LinearSegmentedColormap
# Not sure what the first arg here should be
f = pkg_resources.resource_stream(__name__, fname)
cm_data = pd.read_csv(f, header=None).values
f.close()
return LinearSegmentedColormap.from_list('viridis', cm_data)
def import_seaborn():
'''import seaborn and handle deprecation of apionly module'''
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
try:
import seaborn.apionly as sns
if (w and issubclass(w[-1].category, UserWarning) and
("seaborn.apionly module" in str(w[-1].message))):
raise ImportError
except ImportError:
import seaborn as sns
finally:
warnings.resetwarnings()
return sns
_registered = False
def register_pandas_datetime_converter_if_needed():
# based on https://github.com/pandas-dev/pandas/pull/17710
global _registered
if not _registered:
try:
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except ImportError:
# register_matplotlib_converters new in pandas 0.22
from pandas.tseries import converter
converter.register()
_registered = True
def import_matplotlib_pyplot():
"""Import pyplot as register appropriate converters."""
register_pandas_datetime_converter_if_needed()
import matplotlib.pyplot as plt
return plt
def _determine_extend(calc_data, vmin, vmax):
extend_min = calc_data.min() < vmin
extend_max = calc_data.max() > vmax
if extend_min and extend_max:
extend = 'both'
elif extend_min:
extend = 'min'
elif extend_max:
extend = 'max'
else:
extend = 'neither'
return extend
def _build_discrete_cmap(cmap, levels, extend, filled):
"""
Build a discrete colormap and normalization of the data.
"""
import matplotlib as mpl
if not filled:
# non-filled contour plots
extend = 'max'
if extend == 'both':
ext_n = 2
elif extend in ['min', 'max']:
ext_n = 1
else:
ext_n = 0
n_colors = len(levels) + ext_n - 1
pal = _color_palette(cmap, n_colors)
new_cmap, cnorm = mpl.colors.from_levels_and_colors(
levels, pal, extend=extend)
# copy the old cmap name, for easier testing
new_cmap.name = getattr(cmap, 'name', cmap)
return new_cmap, cnorm
def _color_palette(cmap, n_colors):
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
colors_i = np.linspace(0, 1., n_colors)
if isinstance(cmap, (list, tuple)):
# we have a list of colors
cmap = ListedColormap(cmap, N=n_colors)
pal = cmap(colors_i)
elif isinstance(cmap, basestring):
# we have some sort of named palette
try:
# is this a matplotlib cmap?
cmap = plt.get_cmap(cmap)
pal = cmap(colors_i)
except ValueError:
# ValueError happens when mpl doesn't like a colormap, try seaborn
try:
from seaborn.apionly import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except (ValueError, ImportError):
# or maybe we just got a single color as a string
cmap = ListedColormap([cmap], N=n_colors)
pal = cmap(colors_i)
else:
# cmap better be a LinearSegmentedColormap (e.g. viridis)
pal = cmap(colors_i)
return pal
# _determine_cmap_params is adapted from Seaborn:
# https://github.com/mwaskom/seaborn/blob/v0.6/seaborn/matrix.py#L158
# Used under the terms of Seaborn's license, see licenses/SEABORN_LICENSE.
def _determine_cmap_params(plot_data, vmin=None, vmax=None, cmap=None,
center=None, robust=False, extend=None,
levels=None, filled=True, norm=None):
"""
Use some heuristics to set good defaults for colorbar and range.
Parameters
==========
plot_data: Numpy array
Doesn't handle xarray objects
Returns
=======
cmap_params : dict
Use depends on the type of the plotting function
"""
import matplotlib as mpl
calc_data = np.ravel(plot_data[np.isfinite(plot_data)])
# Handle all-NaN input data gracefully
if calc_data.size == 0:
# Arbitrary default for when all values are NaN
calc_data = np.array(0.0)
# Setting center=False prevents a divergent cmap
possibly_divergent = center is not False
# Set center to 0 so math below makes sense but remember its state
center_is_none = False
if center is None:
center = 0
center_is_none = True
# Setting both vmin and vmax prevents a divergent cmap
if (vmin is not None) and (vmax is not None):
possibly_divergent = False
# Setting vmin or vmax implies linspaced levels
user_minmax = (vmin is not None) or (vmax is not None)
# vlim might be computed below
vlim = None
if vmin is None:
if robust:
vmin = np.percentile(calc_data, ROBUST_PERCENTILE)
else:
vmin = calc_data.min()
elif possibly_divergent:
vlim = abs(vmin - center)
if vmax is None:
if robust:
vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)
else:
vmax = calc_data.max()
elif possibly_divergent:
vlim = abs(vmax - center)
if possibly_divergent:
# kwargs not specific about divergent or not: infer defaults from data
divergent = ((vmin < 0) and (vmax > 0)) or not center_is_none
else:
divergent = False
# A divergent map should be symmetric around the center value
if divergent:
if vlim is None:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
# Now add in the centering value and set the limits
vmin += center
vmax += center
# Choose default colormaps if not provided
if cmap is None:
if divergent:
cmap = "RdBu_r"
else:
cmap = "viridis"
# Allow viridis before matplotlib 1.5
if cmap == "viridis":
cmap = _load_default_cmap()
# Handle discrete levels
if levels is not None:
if is_scalar(levels):
if user_minmax or levels == 1:
levels = np.linspace(vmin, vmax, levels)
else:
# N in MaxNLocator refers to bins, not ticks
ticker = mpl.ticker.MaxNLocator(levels - 1)
levels = ticker.tick_values(vmin, vmax)
vmin, vmax = levels[0], levels[-1]
if extend is None:
extend = _determine_extend(calc_data, vmin, vmax)
if levels is not None:
cmap, norm = _build_discrete_cmap(cmap, levels, extend, filled)
return dict(vmin=vmin, vmax=vmax, cmap=cmap, extend=extend,
levels=levels, norm=norm)
def _infer_xy_labels_3d(darray, x, y, rgb):
"""
Determine x and y labels for showing RGB images.
Attempts to infer which dimension is RGB/RGBA by size and order of dims.
"""
assert rgb is None or rgb != x
assert rgb is None or rgb != y
# Start by detecting and reporting invalid combinations of arguments
assert darray.ndim == 3
not_none = [a for a in (x, y, rgb) if a is not None]
if len(set(not_none)) < len(not_none):
raise ValueError(
'Dimension names must be None or unique strings, but imshow was '
'passed x=%r, y=%r, and rgb=%r.' % (x, y, rgb))
for label in not_none:
if label not in darray.dims:
raise ValueError('%r is not a dimension' % (label,))
# Then calculate rgb dimension if certain and check validity
could_be_color = [label for label in darray.dims
if darray[label].size in (3, 4) and label not in (x, y)]
if rgb is None and not could_be_color:
raise ValueError(
'A 3-dimensional array was passed to imshow(), but there is no '
'dimension that could be color. At least one dimension must be '
'of size 3 (RGB) or 4 (RGBA), and not given as x or y.')
if rgb is None and len(could_be_color) == 1:
rgb = could_be_color[0]
if rgb is not None and darray[rgb].size not in (3, 4):
raise ValueError('Cannot interpret dim %r of size %s as RGB or RGBA.'
% (rgb, darray[rgb].size))
# If rgb dimension is still unknown, there must be two or three dimensions
# in could_be_color. We therefore warn, and use a heuristic to break ties.
if rgb is None:
assert len(could_be_color) in (2, 3)
rgb = could_be_color[-1]
warnings.warn(
'Several dimensions of this array could be colors. Xarray '
'will use the last possible dimension (%r) to match '
'matplotlib.pyplot.imshow. You can pass names of x, y, '
'and/or rgb dimensions to override this guess.' % rgb)
assert rgb is not None
# Finally, we pick out the red slice and delegate to the 2D version:
return _infer_xy_labels(darray.isel(**{rgb: 0}), x, y)
def _infer_xy_labels(darray, x, y, imshow=False, rgb=None):
"""
Determine x and y labels. For use in _plot2d
darray must be a 2 dimensional data array, or 3d for imshow only.
"""
assert x is None or x != y
if imshow and darray.ndim == 3:
return _infer_xy_labels_3d(darray, x, y, rgb)
if x is None and y is None:
if darray.ndim != 2:
raise ValueError('DataArray must be 2d')
y, x = darray.dims
elif x is None:
if y not in darray.dims:
raise ValueError('y must be a dimension name if x is not supplied')
x = darray.dims[0] if y == darray.dims[1] else darray.dims[1]
elif y is None:
if x not in darray.dims:
raise ValueError('x must be a dimension name if y is not supplied')
y = darray.dims[0] if x == darray.dims[1] else darray.dims[1]
elif any(k not in darray.coords and k not in darray.dims for k in (x, y)):
raise ValueError('x and y must be coordinate variables')
return x, y
def get_axis(figsize, size, aspect, ax):
import matplotlib as mpl
import matplotlib.pyplot as plt
if figsize is not None:
if ax is not None:
raise ValueError('cannot provide both `figsize` and '
'`ax` arguments')
if size is not None:
raise ValueError('cannot provide both `figsize` and '
'`size` arguments')
_, ax = plt.subplots(figsize=figsize)
elif size is not None:
if ax is not None:
raise ValueError('cannot provide both `size` and `ax` arguments')
if aspect is None:
width, height = mpl.rcParams['figure.figsize']
aspect = width / height
figsize = (size * aspect, size)
_, ax = plt.subplots(figsize=figsize)
elif aspect is not None:
raise ValueError('cannot provide `aspect` argument without `size`')
if ax is None:
ax = plt.gca()
return ax
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/plot/utils.py",
"copies": "1",
"size": "11666",
"license": "apache-2.0",
"hash": -669678987555741200,
"line_mean": 31.7696629213,
"line_max": 79,
"alpha_frac": 0.6083490485,
"autogenerated": false,
"ratio": 3.8349769888231426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49433260373231425,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import warnings
import numpy as np
import pandas as pd
from .core import DataFrame, Series, Index, aca, map_partitions, no_default
from .shuffle import shuffle
from .utils import make_meta, insert_meta_param_description
from ..utils import derived_from, M
def _maybe_slice(grouped, columns):
"""
Slice columns if grouped is pd.DataFrameGroupBy
"""
if isinstance(grouped, pd.core.groupby.DataFrameGroupBy):
if columns is not None:
columns = columns if isinstance(columns, str) else list(columns)
return grouped[columns]
return grouped
def _groupby_slice_apply(df, grouper, key, func):
g = df.groupby(grouper)
if key:
g = g[key]
return g.apply(func)
def _groupby_get_group(df, by_key, get_key, columns):
# SeriesGroupBy may pass df which includes group key
grouped = df.groupby(by_key)
if get_key in grouped.groups:
if isinstance(df, pd.DataFrame):
grouped = grouped[columns]
return grouped.get_group(get_key)
else:
# to create empty DataFrame/Series, which has the same
# dtype as the original
if isinstance(df, pd.DataFrame):
# may be SeriesGroupBy
df = df[columns]
return df.iloc[0:0]
###############################################################
# Aggregation
###############################################################
def _groupby_aggregate(df, aggfunc=None, levels=None):
return aggfunc(df.groupby(level=levels))
def _apply_chunk(df, index, func, columns):
if isinstance(df, pd.Series):
return func(df.groupby(index))
else:
columns = columns if isinstance(columns, str) else list(columns)
return func(df.groupby(index)[columns])
def _var_chunk(df, index):
if isinstance(df, pd.Series):
df = df.to_frame()
x = df.groupby(index).sum()
x2 = (df**2).rename(columns=lambda c: c + '-x2')
cols = [c + '-x2' for c in x.columns]
x2 = pd.concat([df, x2], axis=1).groupby(index)[cols].sum()
n = (df.groupby(index).count()
.rename(columns=lambda c: c + '-count'))
result = pd.concat([x, x2, n], axis=1)
return result
def _var_agg(g, ddof):
g = g.groupby(level=0).sum()
nc = len(g.columns)
x = g[g.columns[:nc//3]]
x2 = g[g.columns[nc//3:2*nc//3]].rename(columns=lambda c: c[:-3])
n = g[g.columns[-nc//3:]].rename(columns=lambda c: c[:-6])
result = x2 - x**2 / n
div = (n - ddof)
div[div < 0] = 0
result /= div
result[(n - ddof) == 0] = np.nan
assert isinstance(result, pd.DataFrame)
return result
###############################################################
# nunique
###############################################################
def _nunique_df_chunk(df, index):
# we call set_index here to force a possibly duplicate index
# for our reduce step
grouped = (df.groupby(index).apply(pd.DataFrame.drop_duplicates))
grouped.index = grouped.index.get_level_values(level=0)
return grouped
def _nunique_df_aggregate(df, name):
return df.groupby(level=0)[name].nunique()
def _nunique_series_chunk(df, index):
assert isinstance(df, pd.Series)
if isinstance(index, np.ndarray):
assert len(index) == len(df)
index = pd.Series(index, index=df.index)
grouped = pd.concat([df, index], axis=1).drop_duplicates()
return grouped
def _nunique_series_aggregate(df):
return df.groupby(df.columns[1])[df.columns[0]].nunique()
class _GroupBy(object):
""" Superclass for DataFrameGroupBy and SeriesGroupBy
Parameters
----------
obj: DataFrame or Series
DataFrame or Series to be grouped
index: str, list or Series
The key for grouping
kwargs: dict
Other keywords passed to groupby
"""
def __init__(self, df, index=None, slice=None, **kwargs):
assert isinstance(df, (DataFrame, Series))
self.obj = df
# grouping key passed via groupby method
if (isinstance(index, (DataFrame, Series, Index)) and
isinstance(df, DataFrame)):
if (isinstance(index, Series) and index.name in df.columns and
index._name == df[index.name]._name):
index = index.name
elif (isinstance(index, DataFrame) and
set(index.columns).issubset(df.columns) and
index._name == df[index.columns]._name):
index = list(index.columns)
self.index = index
# slicing key applied to _GroupBy instance
self._slice = slice
self.kwargs = kwargs
if isinstance(index, Series) and df.divisions != index.divisions:
msg = ("The Series and index of the groupby"
" must have the same divisions.")
raise NotImplementedError(msg)
if self._is_grouped_by_sliced_column(self.obj, index):
# check whether given Series is taken from given df and unchanged.
# If any operations are performed, _name will be changed to
# e.g. "elemwise-xxxx"
# if group key (index) is a Series sliced from DataFrame,
# emulation must be performed as the same.
# otherwise, group key is regarded as a separate column
self._meta = self.obj._meta.groupby(self.obj._meta[index.name])
elif isinstance(self.index, Series):
self._meta = self.obj._meta.groupby(self.index._meta)
else:
self._meta = self.obj._meta.groupby(self.index)
def _is_grouped_by_sliced_column(self, df, index):
"""
Return whether index is a Series sliced from df
"""
if isinstance(df, Series):
return False
if (isinstance(index, Series) and index._name in df.columns and
index._name == df[index.name]._name):
return True
if (isinstance(index, DataFrame) and
set(index.columns).issubset(df.columns) and
index._name == df[index.columns]._name):
index = list(index.columns)
return True
return False
@property
def _meta_nonempty(self):
"""
Return a pd.DataFrameGroupBy / pd.SeriesGroupBy which contains sample data.
"""
sample = self.obj._meta_nonempty
if isinstance(self.index, Series):
if self._is_grouped_by_sliced_column(self.obj, self.index):
grouped = sample.groupby(sample[self.index.name])
else:
grouped = sample.groupby(self.index._meta_nonempty)
else:
grouped = sample.groupby(self.index)
return _maybe_slice(grouped, self._slice)
def _aca_agg(self, token, func, aggfunc=None):
if aggfunc is None:
aggfunc = func
meta = func(self._meta)
columns = meta.name if isinstance(meta, pd.Series) else meta.columns
token = self._token_prefix + token
if isinstance(self.index, (tuple, list)) and len(self.index) > 1:
levels = list(range(len(self.index)))
else:
levels = 0
return aca([self.obj, self.index, func, columns],
chunk=_apply_chunk, aggregate=_groupby_aggregate,
meta=meta, token=token,
aggregate_kwargs=dict(aggfunc=aggfunc, levels=levels))
@derived_from(pd.core.groupby.GroupBy)
def sum(self):
return self._aca_agg(token='sum', func=M.sum)
@derived_from(pd.core.groupby.GroupBy)
def min(self):
return self._aca_agg(token='min', func=M.min)
@derived_from(pd.core.groupby.GroupBy)
def max(self):
return self._aca_agg(token='max', func=M.max)
@derived_from(pd.core.groupby.GroupBy)
def count(self):
return self._aca_agg(token='count', func=M.count,
aggfunc=M.sum)
@derived_from(pd.core.groupby.GroupBy)
def mean(self):
return self.sum() / self.count()
@derived_from(pd.core.groupby.GroupBy)
def var(self, ddof=1):
from functools import partial
meta = self.obj._meta
if isinstance(meta, pd.Series):
meta = meta.to_frame()
meta = meta.groupby(self.index).var(ddof=1)
result = aca([self.obj, self.index], chunk=_var_chunk,
aggregate=partial(_var_agg, ddof=ddof), meta=meta,
token=self._token_prefix + 'var')
if isinstance(self.obj, Series):
result = result[result.columns[0]]
if self._slice:
result = result[self._slice]
return result
@derived_from(pd.core.groupby.GroupBy)
def std(self, ddof=1):
v = self.var(ddof)
result = map_partitions(np.sqrt, v, meta=v)
return result
@derived_from(pd.core.groupby.GroupBy)
def get_group(self, key):
token = self._token_prefix + 'get_group'
meta = self._meta.obj
if isinstance(meta, pd.DataFrame) and self._slice is not None:
meta = meta[self._slice]
columns = meta.columns if isinstance(meta, pd.DataFrame) else meta.name
return map_partitions(_groupby_get_group, self.obj, self.index, key,
columns, meta=meta, token=token)
@insert_meta_param_description(pad=12)
def apply(self, func, meta=no_default, columns=no_default):
""" Parallel version of pandas GroupBy.apply
This mimics the pandas version except for the following:
1. The user should provide output metadata.
2. If the grouper does not align with the index then this causes a full
shuffle. The order of rows within each group may not be preserved.
Parameters
----------
func: function
Function to apply
$META
columns: list, scalar or None
Deprecated, use `meta` instead. If list is given, the result is a
DataFrame which columns is specified list. Otherwise, the result is
a Series which name is given scalar or None (no name). If name
keyword is not given, dask tries to infer the result type using its
beginning of data. This inference may take some time and lead to
unexpected result
Returns
-------
applied : Series or DataFrame depending on columns keyword
"""
if columns is not no_default:
warnings.warn("`columns` is deprecated, please use `meta` instead")
if meta is no_default and isinstance(columns, (pd.DataFrame, pd.Series)):
meta = columns
if meta is no_default:
msg = ("`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result")
warnings.warn(msg)
try:
meta = self._meta_nonempty.apply(func)
except:
raise ValueError("Metadata inference failed, please provide "
"`meta` keyword")
else:
meta = make_meta(meta)
df = self.obj
if isinstance(self.index, DataFrame): # add index columns to dataframe
df2 = df.assign(**{'_index_' + c: self.index[c]
for c in self.index.columns})
index = self.index
elif isinstance(self.index, Series):
df2 = df.assign(_index=self.index)
index = self.index
else:
df2 = df
index = df[self.index]
df3 = shuffle(df2, index, **self.kwargs) # shuffle dataframe and index
if isinstance(self.index, DataFrame): # extract index from dataframe
cols = ['_index_' + c for c in self.index.columns]
index2 = df3[cols]
df4 = df3.drop(cols, axis=1, dtype=meta.columns.dtype if
isinstance(meta, pd.DataFrame) else None)
elif isinstance(self.index, Series):
index2 = df3['_index']
index2.name = self.index.name
df4 = df3.drop('_index', axis=1, dtype=meta.columns.dtype if
isinstance(meta, DataFrame) else None)
else:
df4 = df3
index2 = self.index
# Perform embarrassingly parallel groupby-apply
df5 = map_partitions(_groupby_slice_apply, df4, index2,
self._slice, func, meta=meta)
return df5
class DataFrameGroupBy(_GroupBy):
_token_prefix = 'dataframe-groupby-'
def __init__(self, df, index=None, slice=None, **kwargs):
if not kwargs.get('as_index', True):
msg = ("The keyword argument `as_index=False` is not supported in "
"dask.dataframe.groupby")
raise NotImplementedError(msg)
super(DataFrameGroupBy, self).__init__(df, index=index,
slice=slice, **kwargs)
@property
def column_info(self):
warnings.warn('column_info is deprecated')
return self.obj.columns
def __getitem__(self, key):
if isinstance(key, list):
g = DataFrameGroupBy(self.obj, index=self.index,
slice=key, **self.kwargs)
else:
g = SeriesGroupBy(self.obj, index=self.index,
slice=key, **self.kwargs)
# error is raised from pandas
g._meta = g._meta[key]
return g
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
list(filter(pd.compat.isidentifier, self.obj.columns))))
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e)
class SeriesGroupBy(_GroupBy):
_token_prefix = 'series-groupby-'
def __init__(self, df, index, slice=None, **kwargs):
# raise pandas-compat error message
if isinstance(df, Series):
# When obj is Series, index must be Series
if not isinstance(index, Series):
if isinstance(index, list):
if len(index) == 0:
raise ValueError("No group keys passed!")
msg = "Grouper for '{0}' not 1-dimensional"
raise ValueError(msg.format(index[0]))
# raise error from pandas
df._meta.groupby(index)
super(SeriesGroupBy, self).__init__(df, index=index,
slice=slice, **kwargs)
@property
def column_info(self):
warnings.warn('column_info is deprecated')
return self._slice
def nunique(self):
name = self._meta.obj.name
meta = pd.Series([], dtype='int64',
index=pd.Index([], dtype=self._meta.obj.dtype),
name=name)
if isinstance(self.obj, DataFrame):
return aca([self.obj, self.index],
chunk=_nunique_df_chunk,
aggregate=_nunique_df_aggregate,
meta=meta, token='series-groupby-nunique',
aggregate_kwargs={'name': name})
else:
return aca([self.obj, self.index],
chunk=_nunique_series_chunk,
aggregate=_nunique_series_aggregate,
meta=meta, token='series-groupby-nunique')
| {
"repo_name": "cowlicks/dask",
"path": "dask/dataframe/groupby.py",
"copies": "1",
"size": "15815",
"license": "bsd-3-clause",
"hash": -371227985008019400,
"line_mean": 33.9116997792,
"line_max": 95,
"alpha_frac": 0.5638318052,
"autogenerated": false,
"ratio": 4.064507838601902,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003819833631414533,
"num_lines": 453
} |
from __future__ import absolute_import, division, print_function
import warnings
import numpy as np
import pandas as pd
from dask.dataframe.core import (DataFrame, Series, Index,
aca, map_partitions, no_default)
from dask.utils import derived_from
def _maybe_slice(grouped, columns):
"""
Slice columns if grouped is pd.DataFrameGroupBy
"""
if isinstance(grouped, pd.core.groupby.DataFrameGroupBy):
if columns is not None:
return grouped[columns]
return grouped
def _groupby_apply_level0(df, key, func):
grouped = df.groupby(level=0)
grouped = _maybe_slice(grouped, key)
return grouped.apply(func)
def _groupby_apply_index(df, ind, key, func):
grouped = df.groupby(ind)
grouped = _maybe_slice(grouped, key)
return grouped.apply(func)
def _groupby_get_group(df, by_key, get_key, columns):
# SeriesGroupBy may pass df which includes group key
grouped = df.groupby(by_key)
if get_key in grouped.groups:
if isinstance(df, pd.DataFrame):
grouped = grouped[columns]
return grouped.get_group(get_key)
else:
# to create empty DataFrame/Series, which has the same
# dtype as the original
if isinstance(df, pd.DataFrame):
# may be SeriesGroupBy
df = df[columns]
return df.iloc[0:0]
###############################################################
# Aggregation
###############################################################
def _apply_chunk(df, index, func, columns):
if isinstance(df, pd.Series):
return func(df.groupby(index))
else:
return func(df.groupby(index)[columns])
def _sum(g):
return g.sum()
def _min(g):
return g.min()
def _max(g):
return g.max()
def _count(g):
return g.count()
def _var_chunk(df, index):
if isinstance(df, pd.Series):
df = df.to_frame()
x = df.groupby(index).sum()
x2 = (df**2).rename(columns=lambda c: c + '-x2')
cols = [c + '-x2' for c in x.columns]
x2 = pd.concat([df, x2], axis=1).groupby(index)[cols].sum()
n = (df.groupby(index).count()
.rename(columns=lambda c: c + '-count'))
result = pd.concat([x, x2, n], axis=1)
return result
def _var_agg(g, ddof):
g = g.groupby(level=0).sum()
nc = len(g.columns)
x = g[g.columns[:nc//3]]
x2 = g[g.columns[nc//3:2*nc//3]].rename(columns=lambda c: c[:-3])
n = g[g.columns[-nc//3:]].rename(columns=lambda c: c[:-6])
result = x2 - x**2 / n
div = (n - ddof)
div[div < 0] = 0
result /= div
result[(n - ddof) == 0] = np.nan
assert isinstance(result, pd.DataFrame)
return result
###############################################################
# nunique
###############################################################
def _nunique_df_chunk(df, index):
# we call set_index here to force a possibly duplicate index
# for our reduce step
grouped = (df.groupby(index).apply(pd.DataFrame.drop_duplicates))
grouped.index = grouped.index.get_level_values(level=0)
return grouped
def _nunique_series_chunk(df, index):
assert isinstance(df, pd.Series)
if isinstance(index, np.ndarray):
assert len(index) == len(df)
index = pd.Series(index, index=df.index)
grouped = pd.concat([df, index], axis=1).drop_duplicates()
return grouped
class _GroupBy(object):
""" Superclass for DataFrameGroupBy and SeriesGroupBy
Parameters
----------
obj: DataFrame or Series
DataFrame or Series to be grouped
index: str, list or Series
The key for grouping
kwargs: dict
Other keywords passed to groupby
"""
def __init__(self, df, index=None, slice=None, **kwargs):
assert isinstance(df, (DataFrame, Series))
self.obj = df
# grouping key passed via groupby method
if (isinstance(index, (DataFrame, Series, Index)) and
isinstance(df, DataFrame)):
if (isinstance(index, Series) and index.name in df.columns and
index._name == df[index.name]._name):
index = index.name
elif (isinstance(index, DataFrame) and
set(index.columns).issubset(df.columns) and
index._name == df[index.columns]._name):
index = list(index.columns)
self.index = index
# slicing key applied to _GroupBy instance
self._slice = slice
self.kwargs = kwargs
if isinstance(index, Series) and df.divisions != index.divisions:
msg = ("The Series and index of the groupby"
" must have the same divisions.")
raise NotImplementedError(msg)
if self._is_grouped_by_sliced_column(self.obj, index):
# check whether given Series is taken from given df and unchanged.
# If any operations are performed, _name will be changed to
# e.g. "elemwise-xxxx"
# if group key (index) is a Series sliced from DataFrame,
# emulation must be performed as the same.
# otherwise, group key is regarded as a separate column
self._pd = self.obj._pd.groupby(self.obj._pd[index.name])
elif isinstance(self.index, Series):
self._pd = self.obj._pd.groupby(self.index._pd)
else:
self._pd = self.obj._pd.groupby(self.index)
def _is_grouped_by_sliced_column(self, df, index):
"""
Return whether index is a Series sliced from df
"""
if isinstance(df, Series):
return False
if (isinstance(index, Series) and index._name in df.columns and
index._name == df[index.name]._name):
return True
if (isinstance(index, DataFrame) and
set(index.columns).issubset(df.columns) and
index._name == df[index.columns]._name):
index = list(index.columns)
return True
return False
def _head(self):
"""
Return a pd.DataFrameGroupBy / pd.SeriesGroupBy which contais head data.
"""
head = self.obj.head()
if isinstance(self.index, Series):
if self._is_grouped_by_sliced_column(self.obj, self.index):
grouped = head.groupby(head[self.index.name])
else:
grouped = head.groupby(self.index.head())
else:
grouped = head.groupby(self.index)
grouped = _maybe_slice(grouped, self._slice)
return grouped
def _aca_agg(self, token, func, aggfunc=None):
if aggfunc is None:
aggfunc = func
dummy = func(self._pd)
columns = dummy.name if isinstance(dummy, pd.Series) else dummy.columns
token = self._token_prefix + token
if isinstance(self.index, (tuple, list)) and len(self.index) > 1:
levels = list(range(len(self.index)))
else:
levels = 0
agg = lambda df: aggfunc(df.groupby(level=levels))
return aca([self.obj, self.index, func, columns],
chunk=_apply_chunk, aggregate=agg,
columns=dummy, token=token)
@derived_from(pd.core.groupby.GroupBy)
def sum(self):
return self._aca_agg(token='sum', func=_sum)
@derived_from(pd.core.groupby.GroupBy)
def min(self):
return self._aca_agg(token='min', func=_min)
@derived_from(pd.core.groupby.GroupBy)
def max(self):
return self._aca_agg(token='max', func=_max)
@derived_from(pd.core.groupby.GroupBy)
def count(self):
return self._aca_agg(token='count', func=_count,
aggfunc=_sum)
@derived_from(pd.core.groupby.GroupBy)
def mean(self):
return self.sum() / self.count()
@derived_from(pd.core.groupby.GroupBy)
def var(self, ddof=1):
from functools import partial
meta = self.obj._pd
if isinstance(meta, pd.Series):
meta = meta.to_frame()
meta = meta.groupby(self.index).var(ddof=1)
result = aca([self.obj, self.index], _var_chunk,
partial(_var_agg, ddof=ddof), meta,
token=self._token_prefix + 'var')
if isinstance(self.obj, Series):
result = result[result.columns[0]]
if self._slice:
result = result[self._slice]
return result
@derived_from(pd.core.groupby.GroupBy)
def std(self, ddof=1):
v = self.var(ddof)
result = map_partitions(np.sqrt, v, v)
return result
@derived_from(pd.core.groupby.GroupBy)
def get_group(self, key):
token = self._token_prefix + 'get_group'
dummy = self._pd.obj
if isinstance(dummy, pd.DataFrame) and self._slice is not None:
dummy = dummy[self._slice]
columns = dummy.columns if isinstance(dummy, pd.DataFrame) else dummy.name
return map_partitions(_groupby_get_group, dummy, self.obj,
self.index, key, columns, token=token)
def apply(self, func, columns=no_default):
""" Parallel version of pandas GroupBy.apply
This mimics the pandas version except for the following:
1. The user should provide output columns.
2. If the grouper does not align with the index then this causes a full
shuffle. The order of rows within each group may not be preserved.
Parameters
----------
func: function
Function to apply
columns: list, scalar or None
If list is given, the result is a DataFrame which columns is
specified list. Otherwise, the result is a Series which name is
given scalar or None (no name). If name keyword is not given, dask
tries to infer the result type using its beginning of data. This
inference may take some time and lead to unexpected result
Returns
-------
applied : Series or DataFrame depending on columns keyword
"""
if columns is no_default:
msg = ("columns is not specified, inferred from partial data. "
"Please provide columns if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, columns=['x', 'y']) for dataframe result\n"
" or: .apply(func, columns='x') for series result")
warnings.warn(msg)
dummy = self._head().apply(func)
columns = dummy.columns if isinstance(dummy, pd.DataFrame) else dummy.name
else:
dummy = columns
columns = self._slice
if isinstance(self.index, Series):
if self.index._name == self.obj.index._name:
df = self.obj
else:
df = self.obj.set_index(self.index, drop=False,
**self.kwargs)
return map_partitions(_groupby_apply_level0, dummy,
df, columns, func)
else:
from .shuffle import shuffle
df = shuffle(self.obj, self.index, **self.kwargs)
return map_partitions(_groupby_apply_index, dummy,
df, self.index, columns, func)
class DataFrameGroupBy(_GroupBy):
_token_prefix = 'dataframe-groupby-'
def __init__(self, df, index=None, slice=None, **kwargs):
if not kwargs.get('as_index', True):
msg = ("The keyword argument `as_index=False` is not supported in "
"dask.dataframe.groupby")
raise NotImplementedError(msg)
super(DataFrameGroupBy, self).__init__(df, index=index,
slice=slice, **kwargs)
@property
def column_info(self):
warnings.warn('column_info is deprecated')
return self.obj.columns
def __getitem__(self, key):
if isinstance(key, list):
g = DataFrameGroupBy(self.obj, index=self.index,
slice=key, **self.kwargs)
else:
g = SeriesGroupBy(self.obj, index=self.index,
slice=key, **self.kwargs)
# error is raised from pandas
g._pd = g._pd[key]
return g
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
list(filter(pd.compat.isidentifier, self.obj.columns))))
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e)
class SeriesGroupBy(_GroupBy):
_token_prefix = 'series-groupby-'
def __init__(self, df, index, slice=None, **kwargs):
# raise pandas-compat error message
if isinstance(df, Series):
# When obj is Series, index must be Series
if not isinstance(index, Series):
if isinstance(index, list):
if len(index) == 0:
raise ValueError("No group keys passed!")
msg = "Grouper for '{0}' not 1-dimensional"
raise ValueError(msg.format(index[0]))
# raise error from pandas
df._pd.groupby(index)
super(SeriesGroupBy, self).__init__(df, index=index,
slice=slice, **kwargs)
@property
def column_info(self):
warnings.warn('column_info is deprecated')
return self._slice
def nunique(self):
name = self._pd.obj.name
if isinstance(self.obj, DataFrame):
def agg(df):
return df.groupby(level=0)[name].nunique()
return aca([self.obj, self.index],
chunk=_nunique_df_chunk, aggregate=agg,
columns=name, token='series-groupby-nunique')
else:
def agg(df):
return df.groupby(df.columns[1])[df.columns[0]].nunique()
return aca([self.obj, self.index],
chunk=_nunique_series_chunk, aggregate=agg,
columns=name, token='series-groupby-nunique')
| {
"repo_name": "mikegraham/dask",
"path": "dask/dataframe/groupby.py",
"copies": "1",
"size": "14261",
"license": "bsd-3-clause",
"hash": -5082556406472813000,
"line_mean": 31.5593607306,
"line_max": 86,
"alpha_frac": 0.5601991445,
"autogenerated": false,
"ratio": 4.0571834992887625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5117382643788763,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import warnings
import numpy as np
import pandas as pd
from . import npcompat
def _validate_axis(data, axis):
ndim = data.ndim
if not -ndim <= axis < ndim:
raise IndexError('axis %r out of bounds [-%r, %r)'
% (axis, ndim, ndim))
if axis < 0:
axis += ndim
return axis
def _select_along_axis(values, idx, axis):
other_ind = np.ix_(*[np.arange(s) for s in idx.shape])
sl = other_ind[:axis] + (idx,) + other_ind[axis:]
return values[sl]
def nanfirst(values, axis):
axis = _validate_axis(values, axis)
idx_first = np.argmax(~pd.isnull(values), axis=axis)
return _select_along_axis(values, idx_first, axis)
def nanlast(values, axis):
axis = _validate_axis(values, axis)
rev = (slice(None),) * axis + (slice(None, None, -1),)
idx_last = -1 - np.argmax(~pd.isnull(values)[rev], axis=axis)
return _select_along_axis(values, idx_last, axis)
def inverse_permutation(indices):
"""Return indices for an inverse permutation.
Parameters
----------
indices : 1D np.ndarray with dtype=int
Integer positions to assign elements to.
Returns
-------
inverse_permutation : 1D np.ndarray with dtype=int
Integer indices to take from the original array to create the
permutation.
"""
# use intp instead of int64 because of windows :(
inverse_permutation = np.empty(len(indices), dtype=np.intp)
inverse_permutation[indices] = np.arange(len(indices), dtype=np.intp)
return inverse_permutation
def _ensure_bool_is_ndarray(result, *args):
# numpy will sometimes return a scalar value from binary comparisons if it
# can't handle the comparison instead of broadcasting, e.g.,
# In [10]: 1 == np.array(['a', 'b'])
# Out[10]: False
# This function ensures that the result is the appropriate shape in these
# cases
if isinstance(result, bool):
shape = np.broadcast(*args).shape
constructor = np.ones if result else np.zeros
result = constructor(shape, dtype=bool)
return result
def array_eq(self, other):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'elementwise comparison failed')
return _ensure_bool_is_ndarray(self == other, self, other)
def array_ne(self, other):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'elementwise comparison failed')
return _ensure_bool_is_ndarray(self != other, self, other)
def _is_contiguous(positions):
"""Given a non-empty list, does it consist of contiguous integers?"""
previous = positions[0]
for current in positions[1:]:
if current != previous + 1:
return False
previous = current
return True
def _advanced_indexer_subspaces(key):
"""Indices of the advanced indexes subspaces for mixed indexing and vindex.
"""
if not isinstance(key, tuple):
key = (key,)
advanced_index_positions = [i for i, k in enumerate(key)
if not isinstance(k, slice)]
if (not advanced_index_positions or
not _is_contiguous(advanced_index_positions)):
# Nothing to reorder: dimensions on the indexing result are already
# ordered like vindex. See NumPy's rule for "Combining advanced and
# basic indexing":
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#combining-advanced-and-basic-indexing
return (), ()
non_slices = [k for k in key if not isinstance(k, slice)]
ndim = len(np.broadcast(*non_slices).shape)
mixed_positions = advanced_index_positions[0] + np.arange(ndim)
vindex_positions = np.arange(ndim)
return mixed_positions, vindex_positions
class NumpyVIndexAdapter(object):
"""Object that implements indexing like vindex on a np.ndarray.
This is a pure Python implementation of (some of) the logic in this NumPy
proposal: https://github.com/numpy/numpy/pull/6256
"""
def __init__(self, array):
self._array = array
def __getitem__(self, key):
mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)
return np.moveaxis(self._array[key], mixed_positions, vindex_positions)
def __setitem__(self, key, value):
"""Value must have dimensionality matching the key."""
mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)
self._array[key] = np.moveaxis(value, vindex_positions,
mixed_positions)
def rolling_window(a, axis, window, center, fill_value):
""" rolling window with padding. """
pads = [(0, 0) for s in a.shape]
if center:
start = int(window / 2) # 10 -> 5, 9 -> 4
end = window - 1 - start
pads[axis] = (start, end)
else:
pads[axis] = (window - 1, 0)
a = np.pad(a, pads, mode='constant', constant_values=fill_value)
return _rolling_window(a, window, axis)
def _rolling_window(a, window, axis=-1):
"""
Make an ndarray with a rolling window along axis.
Parameters
----------
a : array_like
Array to add rolling window to
axis: int
axis position along which rolling window will be applied.
window : int
Size of rolling window
Returns
-------
Array that is a view of the original array with a added dimension
of size w.
Examples
--------
>>> x=np.arange(10).reshape((2,5))
>>> np.rolling_window(x, 3, axis=-1)
array([[[0, 1, 2], [1, 2, 3], [2, 3, 4]],
[[5, 6, 7], [6, 7, 8], [7, 8, 9]]])
Calculate rolling mean of last dimension:
>>> np.mean(np.rolling_window(x, 3, axis=-1), -1)
array([[ 1., 2., 3.],
[ 6., 7., 8.]])
This function is taken from https://github.com/numpy/numpy/pull/31
but slightly modified to accept axis option.
"""
axis = _validate_axis(a, axis)
a = np.swapaxes(a, axis, -1)
if window < 1:
raise ValueError(
"`window` must be at least 1. Given : {}".format(window))
if window > a.shape[-1]:
raise ValueError("`window` is too long. Given : {}".format(window))
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
rolling = npcompat.as_strided(a, shape=shape, strides=strides,
writeable=False)
return np.swapaxes(rolling, -2, axis)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/nputils.py",
"copies": "1",
"size": "6524",
"license": "apache-2.0",
"hash": -4987010971066444000,
"line_mean": 31.783919598,
"line_max": 111,
"alpha_frac": 0.6201716738,
"autogenerated": false,
"ratio": 3.736540664375716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9856712338175716,
"avg_score": 0,
"num_lines": 199
} |
from __future__ import absolute_import, division, print_function
import warnings
import pandas as pd
import numpy as np
from pandas.tseries.resample import Resampler as pd_Resampler
from ..core import DataFrame, Series
from ...base import tokenize
from ...utils import derived_from
def getnanos(rule):
try:
return getattr(rule, 'nanos', None)
except ValueError:
return None
def _resample(obj, rule, how, **kwargs):
resampler = Resampler(obj, rule, **kwargs)
if how is not None:
w = FutureWarning(("how in .resample() is deprecated "
"the new syntax is .resample(...)"
".{0}()").format(how))
warnings.warn(w)
return getattr(resampler, how)()
return resampler
def _resample_series(series, start, end, reindex_closed, rule,
resample_kwargs, how, fill_value):
out = getattr(series.resample(rule, **resample_kwargs), how)()
return out.reindex(pd.date_range(start, end, freq=rule,
closed=reindex_closed),
fill_value=fill_value)
def _resample_bin_and_out_divs(divisions, rule, closed='left', label='left'):
rule = pd.tseries.frequencies.to_offset(rule)
g = pd.TimeGrouper(rule, how='count', closed=closed, label=label)
# Determine bins to apply `how` to. Disregard labeling scheme.
divs = pd.Series(range(len(divisions)), index=divisions)
temp = divs.resample(rule, closed=closed, label='left').count()
tempdivs = temp.loc[temp > 0].index
# Cleanup closed == 'right' and label == 'right'
res = pd.offsets.Nano() if hasattr(rule, 'delta') else pd.offsets.Day()
if g.closed == 'right':
newdivs = tempdivs + res
else:
newdivs = tempdivs
if g.label == 'right':
outdivs = tempdivs + rule
else:
outdivs = tempdivs
newdivs = newdivs.tolist()
outdivs = outdivs.tolist()
# Adjust ends
if newdivs[0] < divisions[0]:
newdivs[0] = divisions[0]
if newdivs[-1] < divisions[-1]:
if len(newdivs) < len(divs):
setter = lambda a, val: a.append(val)
else:
setter = lambda a, val: a.__setitem__(-1, val)
setter(newdivs, divisions[-1])
if outdivs[-1] > divisions[-1]:
setter(outdivs, outdivs[-1])
elif outdivs[-1] < divisions[-1]:
setter(outdivs, temp.index[-1])
return tuple(map(pd.Timestamp, newdivs)), tuple(map(pd.Timestamp, outdivs))
class Resampler(object):
def __init__(self, obj, rule, **kwargs):
if not obj.known_divisions:
msg = ("Can only resample dataframes with known divisions\n"
"See dask.pydata.io/en/latest/dataframe-partitions.html\n"
"for more information.")
raise ValueError(msg)
self.obj = obj
rule = pd.tseries.frequencies.to_offset(rule)
day_nanos = pd.tseries.frequencies.Day().nanos
if getnanos(rule) and day_nanos % rule.nanos:
raise NotImplementedError('Resampling frequency %s that does'
' not evenly divide a day is not '
'implemented' % rule)
self._rule = rule
self._kwargs = kwargs
def _agg(self, how, meta=None, fill_value=np.nan):
rule = self._rule
kwargs = self._kwargs
name = 'resample-' + tokenize(self.obj, rule, kwargs, how)
# Create a grouper to determine closed and label conventions
newdivs, outdivs = _resample_bin_and_out_divs(self.obj.divisions, rule,
**kwargs)
# Repartition divs into bins. These won't match labels after mapping
partitioned = self.obj.repartition(newdivs, force=True)
keys = partitioned._keys()
dsk = partitioned.dask
args = zip(keys, outdivs, outdivs[1:], ['left'] * (len(keys) - 1) + [None])
for i, (k, s, e, c) in enumerate(args):
dsk[(name, i)] = (_resample_series, k, s, e, c,
rule, kwargs, how, fill_value)
# Infer output metadata
meta_r = self.obj._meta_nonempty.resample(self._rule, **self._kwargs)
meta = getattr(meta_r, how)()
if isinstance(meta, pd.DataFrame):
return DataFrame(dsk, name, meta, outdivs)
return Series(dsk, name, meta, outdivs)
@derived_from(pd_Resampler)
def count(self):
return self._agg('count', fill_value=0)
@derived_from(pd_Resampler)
def first(self):
return self._agg('first')
@derived_from(pd_Resampler)
def last(self):
return self._agg('last')
@derived_from(pd_Resampler)
def mean(self):
return self._agg('mean')
@derived_from(pd_Resampler)
def min(self):
return self._agg('min')
@derived_from(pd_Resampler)
def median(self):
return self._agg('median')
@derived_from(pd_Resampler)
def max(self):
return self._agg('max')
@derived_from(pd_Resampler)
def ohlc(self):
return self._agg('ohlc')
@derived_from(pd_Resampler)
def prod(self):
return self._agg('prod')
@derived_from(pd_Resampler)
def sem(self):
return self._agg('sem')
@derived_from(pd_Resampler)
def std(self):
return self._agg('std')
@derived_from(pd_Resampler)
def sum(self):
return self._agg('sum')
@derived_from(pd_Resampler)
def var(self):
return self._agg('var')
| {
"repo_name": "gameduell/dask",
"path": "dask/dataframe/tseries/resample.py",
"copies": "2",
"size": "5597",
"license": "bsd-3-clause",
"hash": -232735422896459360,
"line_mean": 30.8011363636,
"line_max": 83,
"alpha_frac": 0.5819188851,
"autogenerated": false,
"ratio": 3.558169103623649,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002775790593587204,
"num_lines": 176
} |
from __future__ import absolute_import, division, print_function
import warnings
import pandas as pd
import numpy as np
from ..core import DataFrame, Series
from ...base import tokenize
def getnanos(rule):
try:
return getattr(rule, 'nanos', None)
except ValueError:
return None
def _resample(obj, rule, how, **kwargs):
resampler = Resampler(obj, rule, **kwargs)
if how is not None:
w = FutureWarning(("how in .resample() is deprecated "
"the new syntax is .resample(...)"
".{0}()").format(how))
warnings.warn(w)
return getattr(resampler, how)()
return resampler
def _resample_series(series, start, end, reindex_closed, rule,
resample_kwargs, how, fill_value):
out = getattr(series.resample(rule, **resample_kwargs), how)()
return out.reindex(pd.date_range(start, end, freq=rule,
closed=reindex_closed),
fill_value=fill_value)
def _resample_bin_and_out_divs(divisions, rule, closed='left', label='left'):
rule = pd.datetools.to_offset(rule)
g = pd.TimeGrouper(rule, how='count', closed=closed, label=label)
# Determine bins to apply `how` to. Disregard labeling scheme.
divs = pd.Series(range(len(divisions)), index=divisions)
temp = divs.resample(rule, how='count', closed=closed, label='left')
tempdivs = temp.loc[temp > 0].index
# Cleanup closed == 'right' and label == 'right'
res = pd.offsets.Nano() if hasattr(rule, 'delta') else pd.offsets.Day()
if g.closed == 'right':
newdivs = tempdivs + res
else:
newdivs = tempdivs
if g.label == 'right':
outdivs = tempdivs + rule
else:
outdivs = tempdivs
newdivs = newdivs.tolist()
outdivs = outdivs.tolist()
# Adjust ends
if newdivs[0] < divisions[0]:
newdivs[0] = divisions[0]
if newdivs[-1] < divisions[-1]:
if len(newdivs) < len(divs):
setter = lambda a, val: a.append(val)
else:
setter = lambda a, val: a.__setitem__(-1, val)
setter(newdivs, divisions[-1])
if outdivs[-1] > divisions[-1]:
setter(outdivs, outdivs[-1])
elif outdivs[-1] < divisions[-1]:
setter(outdivs, temp.index[-1])
return tuple(map(pd.Timestamp, newdivs)), tuple(map(pd.Timestamp, outdivs))
class Resampler(object):
def __init__(self, obj, rule, **kwargs):
if not obj.known_divisions:
raise ValueError("Can only resample dataframes with known divisions"
"\nSee dask.pydata.io/en/latest/dataframe-partitions.html"
"\nfor more information.")
self.obj = obj
rule = pd.datetools.to_offset(rule)
day_nanos = pd.datetools.Day().nanos
if getnanos(rule) and day_nanos % rule.nanos:
raise NotImplementedError('Resampling frequency %s that does'
' not evenly divide a day is not '
'implemented' % rule)
self._rule = rule
self._kwargs = kwargs
def _agg(self, how, meta=None, fill_value=np.nan):
rule = self._rule
kwargs = self._kwargs
name = 'resample-' + tokenize(self.obj, rule, kwargs, how)
# Create a grouper to determine closed and label conventions
newdivs, outdivs = _resample_bin_and_out_divs(self.obj.divisions, rule,
**kwargs)
# Repartition divs into bins. These won't match labels after mapping
partitioned = self.obj.repartition(newdivs, force=True)
keys = partitioned._keys()
dsk = partitioned.dask
args = zip(keys, outdivs, outdivs[1:], ['left']*(len(keys)-1) + [None])
for i, (k, s, e, c) in enumerate(args):
dsk[(name, i)] = (_resample_series, k, s, e, c,
rule, kwargs, how, fill_value)
# Infer output metadata
meta_r = self.obj._meta_nonempty.resample(self._rule, **self._kwargs)
meta = getattr(meta_r, how)()
if isinstance(meta, pd.DataFrame):
return DataFrame(dsk, name, meta, outdivs)
return Series(dsk, name, meta, outdivs)
def count(self):
return self._agg('count', fill_value=0)
def first(self):
return self._agg('first')
def last(self):
return self._agg('last')
def mean(self):
return self._agg('mean')
def min(self):
return self._agg('min')
def median(self):
return self._agg('median')
def max(self):
return self._agg('max')
def ohlc(self):
return self._agg('ohlc')
def prod(self):
return self._agg('prod')
def sem(self):
return self._agg('sem')
def std(self):
return self._agg('std')
def sum(self):
return self._agg('sum')
def var(self):
return self._agg('var')
| {
"repo_name": "cowlicks/dask",
"path": "dask/dataframe/tseries/resample.py",
"copies": "2",
"size": "5034",
"license": "bsd-3-clause",
"hash": -5633275493435163000,
"line_mean": 30.4625,
"line_max": 80,
"alpha_frac": 0.568533969,
"autogenerated": false,
"ratio": 3.663755458515284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5232289427515284,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import warnings
import pandas as pd
import numpy as np
from ..core import DataFrame, Series
from ..utils import PANDAS_VERSION
from ...base import tokenize
from ...utils import derived_from
if PANDAS_VERSION >= '0.20.0':
from pandas.core.resample import Resampler as pd_Resampler
else:
from pandas.tseries.resample import Resampler as pd_Resampler
def getnanos(rule):
try:
return getattr(rule, 'nanos', None)
except ValueError:
return None
def _resample(obj, rule, how, **kwargs):
resampler = Resampler(obj, rule, **kwargs)
if how is not None:
w = FutureWarning(("how in .resample() is deprecated "
"the new syntax is .resample(...)"
".{0}()").format(how))
warnings.warn(w)
return getattr(resampler, how)()
return resampler
def _resample_series(series, start, end, reindex_closed, rule,
resample_kwargs, how, fill_value):
out = getattr(series.resample(rule, **resample_kwargs), how)()
return out.reindex(pd.date_range(start, end, freq=rule,
closed=reindex_closed),
fill_value=fill_value)
def _resample_bin_and_out_divs(divisions, rule, closed='left', label='left'):
rule = pd.tseries.frequencies.to_offset(rule)
g = pd.TimeGrouper(rule, how='count', closed=closed, label=label)
# Determine bins to apply `how` to. Disregard labeling scheme.
divs = pd.Series(range(len(divisions)), index=divisions)
temp = divs.resample(rule, closed=closed, label='left').count()
tempdivs = temp.loc[temp > 0].index
# Cleanup closed == 'right' and label == 'right'
res = pd.offsets.Nano() if hasattr(rule, 'delta') else pd.offsets.Day()
if g.closed == 'right':
newdivs = tempdivs + res
else:
newdivs = tempdivs
if g.label == 'right':
outdivs = tempdivs + rule
else:
outdivs = tempdivs
newdivs = newdivs.tolist()
outdivs = outdivs.tolist()
# Adjust ends
if newdivs[0] < divisions[0]:
newdivs[0] = divisions[0]
if newdivs[-1] < divisions[-1]:
if len(newdivs) < len(divs):
setter = lambda a, val: a.append(val)
else:
setter = lambda a, val: a.__setitem__(-1, val)
setter(newdivs, divisions[-1])
if outdivs[-1] > divisions[-1]:
setter(outdivs, outdivs[-1])
elif outdivs[-1] < divisions[-1]:
setter(outdivs, temp.index[-1])
return tuple(map(pd.Timestamp, newdivs)), tuple(map(pd.Timestamp, outdivs))
class Resampler(object):
def __init__(self, obj, rule, **kwargs):
if not obj.known_divisions:
msg = ("Can only resample dataframes with known divisions\n"
"See dask.pydata.org/en/latest/dataframe-design.html#partitions\n"
"for more information.")
raise ValueError(msg)
self.obj = obj
rule = pd.tseries.frequencies.to_offset(rule)
day_nanos = pd.tseries.frequencies.Day().nanos
if getnanos(rule) and day_nanos % rule.nanos:
raise NotImplementedError('Resampling frequency %s that does'
' not evenly divide a day is not '
'implemented' % rule)
self._rule = rule
self._kwargs = kwargs
def _agg(self, how, meta=None, fill_value=np.nan):
rule = self._rule
kwargs = self._kwargs
name = 'resample-' + tokenize(self.obj, rule, kwargs, how)
# Create a grouper to determine closed and label conventions
newdivs, outdivs = _resample_bin_and_out_divs(self.obj.divisions, rule,
**kwargs)
# Repartition divs into bins. These won't match labels after mapping
partitioned = self.obj.repartition(newdivs, force=True)
keys = partitioned._keys()
dsk = partitioned.dask
args = zip(keys, outdivs, outdivs[1:], ['left'] * (len(keys) - 1) + [None])
for i, (k, s, e, c) in enumerate(args):
dsk[(name, i)] = (_resample_series, k, s, e, c,
rule, kwargs, how, fill_value)
# Infer output metadata
meta_r = self.obj._meta_nonempty.resample(self._rule, **self._kwargs)
meta = getattr(meta_r, how)()
if isinstance(meta, pd.DataFrame):
return DataFrame(dsk, name, meta, outdivs)
return Series(dsk, name, meta, outdivs)
@derived_from(pd_Resampler)
def count(self):
return self._agg('count', fill_value=0)
@derived_from(pd_Resampler)
def first(self):
return self._agg('first')
@derived_from(pd_Resampler)
def last(self):
return self._agg('last')
@derived_from(pd_Resampler)
def mean(self):
return self._agg('mean')
@derived_from(pd_Resampler)
def min(self):
return self._agg('min')
@derived_from(pd_Resampler)
def median(self):
return self._agg('median')
@derived_from(pd_Resampler)
def max(self):
return self._agg('max')
@derived_from(pd_Resampler)
def ohlc(self):
return self._agg('ohlc')
@derived_from(pd_Resampler)
def prod(self):
return self._agg('prod')
@derived_from(pd_Resampler)
def sem(self):
return self._agg('sem')
@derived_from(pd_Resampler)
def std(self):
return self._agg('std')
@derived_from(pd_Resampler)
def sum(self):
return self._agg('sum')
@derived_from(pd_Resampler)
def var(self):
return self._agg('var')
| {
"repo_name": "mraspaud/dask",
"path": "dask/dataframe/tseries/resample.py",
"copies": "2",
"size": "5745",
"license": "bsd-3-clause",
"hash": 5152296525017448000,
"line_mean": 30.7403314917,
"line_max": 85,
"alpha_frac": 0.5857267189,
"autogenerated": false,
"ratio": 3.544108574953732,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5129835293853733,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import warnings
import pandas as pd
from . import utils
from .alignment import align
from .merge import merge
from .pycompat import OrderedDict, basestring, iteritems
from .variable import concat as concat_vars
from .variable import IndexVariable, Variable, as_variable
def concat(objs, dim=None, data_vars='all', coords='different',
compat='equals', positions=None, indexers=None, mode=None,
concat_over=None):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
compat : {'equals', 'identical'}, optional
String indicating how to compare non-concatenated variables and
dataset global attributes for potential conflicts. 'equals' means
that all variable values and dimensions must be the same;
'identical' means that variable attributes and global attributes
must also be equal.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
indexers, mode, concat_over : deprecated
Returns
-------
concatenated : type of objs
See also
--------
merge
auto_combine
"""
# TODO: add join and ignore_index arguments copied from pandas.concat
# TODO: support concatenating scalar coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError('must supply at least one object to concatenate')
if dim is None:
warnings.warn('the `dim` argument to `concat` will be required '
'in a future version of xarray; for now, setting it to '
"the old default of 'concat_dim'",
FutureWarning, stacklevel=2)
dim = 'concat_dims'
if indexers is not None: # pragma: nocover
warnings.warn('indexers has been renamed to positions; the alias '
'will be removed in a future version of xarray',
FutureWarning, stacklevel=2)
positions = indexers
if mode is not None:
raise ValueError('`mode` is no longer a valid argument to '
'xarray.concat; it has been split into the '
'`data_vars` and `coords` arguments')
if concat_over is not None:
raise ValueError('`concat_over` is no longer a valid argument to '
'xarray.concat; it has been split into the '
'`data_vars` and `coords` arguments')
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError('can only concatenate xarray Dataset and DataArray '
'objects, got %s' % type(first_obj))
return f(objs, dim, data_vars, coords, compat, positions)
def _calc_concat_dim_coord(dim):
"""
Infer the dimension name and 1d coordinate variable (if appropriate)
for concatenating along the new dimension.
"""
if isinstance(dim, basestring):
coord = None
elif not hasattr(dim, 'dims'):
# dim is not a DataArray or IndexVariable
dim_name = getattr(dim, 'name', None)
if dim_name is None:
dim_name = 'concat_dim'
coord = IndexVariable(dim_name, dim)
dim = dim_name
elif not hasattr(dim, 'name'):
coord = as_variable(dim).to_index_variable()
dim, = coord.dims
else:
coord = dim
dim, = coord.dims
return dim, coord
def _calc_concat_over(datasets, dim, data_vars, coords):
"""
Determine which dataset variables need to be concatenated in the result,
and which can simply be taken from the first dataset.
"""
# Return values
concat_over = set()
equals = {}
if dim in datasets[0]:
concat_over.add(dim)
for ds in datasets:
concat_over.update(k for k, v in ds.variables.items()
if dim in v.dims)
def process_subset_opt(opt, subset):
if isinstance(opt, basestring):
if opt == 'different':
# all nonindexes that are not the same in each dataset
for k in getattr(datasets[0], subset):
if k not in concat_over:
# Compare the variable of all datasets vs. the one
# of the first dataset. Perform the minimum amount of
# loads in order to avoid multiple loads from disk
# while keeping the RAM footprint low.
v_lhs = datasets[0].variables[k].load()
# We'll need to know later on if variables are equal.
computed = []
for ds_rhs in datasets[1:]:
v_rhs = ds_rhs.variables[k].compute()
computed.append(v_rhs)
if not v_lhs.equals(v_rhs):
concat_over.add(k)
equals[k] = False
# computed variables are not to be re-computed
# again in the future
for ds, v in zip(datasets[1:], computed):
ds.variables[k].data = v.data
break
else:
equals[k] = True
elif opt == 'all':
concat_over.update(set(getattr(datasets[0], subset)) -
set(datasets[0].dims))
elif opt == 'minimal':
pass
else:
raise ValueError("unexpected value for %s: %s" % (subset, opt))
else:
invalid_vars = [k for k in opt
if k not in getattr(datasets[0], subset)]
if invalid_vars:
if subset == 'coords':
raise ValueError(
'some variables in coords are not coordinates on '
'the first dataset: %s' % (invalid_vars,))
else:
raise ValueError(
'some variables in data_vars are not data variables '
'on the first dataset: %s' % (invalid_vars,))
concat_over.update(opt)
process_subset_opt(data_vars, 'data_vars')
process_subset_opt(coords, 'coords')
return concat_over, equals
def _dataset_concat(datasets, dim, data_vars, coords, compat, positions):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset
if compat not in ['equals', 'identical']:
raise ValueError("compat=%r invalid: must be 'equals' "
"or 'identical'" % compat)
dim, coord = _calc_concat_dim_coord(dim)
# Make sure we're working on a copy (we'll be loading variables)
datasets = [ds.copy() for ds in datasets]
datasets = align(*datasets, join='outer', copy=False, exclude=[dim])
concat_over, equals = _calc_concat_over(datasets, dim, data_vars, coords)
def insert_result_variable(k, v):
assert isinstance(v, Variable)
if k in datasets[0].coords:
result_coord_names.add(k)
result_vars[k] = v
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
result_encoding = datasets[0].encoding
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if (compat == 'identical' and
not utils.dict_equiv(ds.attrs, result_attrs)):
raise ValueError('dataset global attributes not equal')
for k, v in iteritems(ds.variables):
if k not in result_vars and k not in concat_over:
raise ValueError('encountered unexpected variable %r' % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError('%r is a coordinate in some datasets but not '
'others' % k)
elif k in result_vars and k != dim:
# Don't use Variable.identical as it internally invokes
# Variable.equals, and we may already know the answer
if compat == 'identical' and not utils.dict_equiv(
v.attrs, result_vars[k].attrs):
raise ValueError(
'variable %s not identical across datasets' % k)
# Proceed with equals()
try:
# May be populated when using the "different" method
is_equal = equals[k]
except KeyError:
result_vars[k].load()
is_equal = v.equals(result_vars[k])
if not is_equal:
raise ValueError(
'variable %s not equal across datasets' % k)
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
def ensure_common_dims(vars):
# ensure each variable with the given name shares the same
# dimensions and the same shape for all of them except along the
# concat dimension
common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))
if dim not in common_dims:
common_dims = (dim,) + common_dims
for var, dim_len in zip(vars, dim_lengths):
if var.dims != common_dims:
common_shape = tuple(non_concat_dims.get(d, dim_len)
for d in common_dims)
var = var.set_dims(common_dims, common_shape)
yield var
# stack up each variable to fill-out the dataset (in order)
for k in datasets[0].variables:
if k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = concat_vars(vars, dim, positions)
insert_result_variable(k, combined)
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
result.encoding = result_encoding
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
result[coord.name] = coord
return result
def _dataarray_concat(arrays, dim, data_vars, coords, compat,
positions):
arrays = list(arrays)
if data_vars != 'all':
raise ValueError('data_vars is not a valid argument when '
'concatenating DataArray objects')
datasets = []
for n, arr in enumerate(arrays):
if n == 0:
name = arr.name
elif name != arr.name:
if compat == 'identical':
raise ValueError('array names not identical')
else:
arr = arr.rename(name)
datasets.append(arr._to_temp_dataset())
ds = _dataset_concat(datasets, dim, data_vars, coords, compat,
positions)
return arrays[0]._from_temp_dataset(ds, name)
def _auto_concat(datasets, dim=None, data_vars='all', coords='different'):
if len(datasets) == 1 and dim is None:
# There is nothing more to combine, so kick out early.
return datasets[0]
else:
if dim is None:
ds0 = datasets[0]
ds1 = datasets[1]
concat_dims = set(ds0.dims)
if ds0.dims != ds1.dims:
dim_tuples = set(ds0.dims.items()) - set(ds1.dims.items())
concat_dims = set(i for i, _ in dim_tuples)
if len(concat_dims) > 1:
concat_dims = set(d for d in concat_dims
if not ds0[d].equals(ds1[d]))
if len(concat_dims) > 1:
raise ValueError('too many different dimensions to '
'concatenate: %s' % concat_dims)
elif len(concat_dims) == 0:
raise ValueError('cannot infer dimension to concatenate: '
'supply the ``concat_dim`` argument '
'explicitly')
dim, = concat_dims
return concat(datasets, dim=dim, data_vars=data_vars, coords=coords)
_CONCAT_DIM_DEFAULT = '__infer_concat_dim__'
def auto_combine(datasets,
concat_dim=_CONCAT_DIM_DEFAULT,
compat='no_conflicts',
data_vars='all', coords='different'):
"""Attempt to auto-magically combine the given datasets into one.
This method attempts to combine a list of datasets into a single entity by
inspecting metadata and using a combination of concat and merge.
It does not concatenate along more than one dimension or sort data under
any circumstances. It does align coordinates, but different variables on
datasets can cause it to fail under some scenarios. In complex cases, you
may need to clean up your data and use ``concat``/``merge`` explicitly.
``auto_combine`` works well if you have N years of data and M data
variables, and each combination of a distinct time period and set of data
variables is saved its own dataset.
Parameters
----------
datasets : sequence of xarray.Dataset
Dataset objects to merge.
concat_dim : str or DataArray or Index, optional
Dimension along which to concatenate variables, as used by
:py:func:`xarray.concat`. You only need to provide this argument if
the dimension along which you want to concatenate is not a dimension
in the original datasets, e.g., if you want to stack a collection of
2D arrays along a third dimension.
By default, xarray attempts to infer this argument by examining
component files. Set ``concat_dim=None`` explicitly to disable
concatenation.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
Details are in the documentation of concat
coords : {'minimal', 'different', 'all' o list of str}, optional
Details are in the documentation of concat
Returns
-------
combined : xarray.Dataset
See also
--------
concat
Dataset.merge
"""
from toolz import itertoolz
if concat_dim is not None:
dim = None if concat_dim is _CONCAT_DIM_DEFAULT else concat_dim
grouped = itertoolz.groupby(lambda ds: tuple(sorted(ds.data_vars)),
datasets).values()
concatenated = [_auto_concat(ds, dim=dim,
data_vars=data_vars, coords=coords)
for ds in grouped]
else:
concatenated = datasets
merged = merge(concatenated, compat=compat)
return merged
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/combine.py",
"copies": "1",
"size": "18716",
"license": "apache-2.0",
"hash": -7987946823958506000,
"line_mean": 41.5363636364,
"line_max": 79,
"alpha_frac": 0.5915793973,
"autogenerated": false,
"ratio": 4.659198406771222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5750777804071223,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import weakref
from itertools import chain
from weakref import WeakKeyDictionary
from contextlib import contextmanager
from .callback_container import CallbackContainer
__all__ = ['CallbackProperty', 'callback_property',
'add_callback', 'remove_callback',
'delay_callback', 'ignore_callback',
'HasCallbackProperties', 'keep_in_sync']
class CallbackProperty(object):
"""
A property that callback functions can be added to.
When a callback property changes value, each callback function
is called with information about the state change. Otherwise,
callback properties behave just like normal instance variables.
CallbackProperties must be defined at the class level. Use
the helper function :func:`~echo.add_callback` to attach a callback to
a specific instance of a class with CallbackProperties
Parameters
----------
default
The initial value for the property
docstring : str
The docstring for the property
getter, setter : func
Custom getter and setter functions (advanced)
"""
def __init__(self, default=None, docstring=None, getter=None, setter=None):
"""
:param default: The initial value for the property
"""
self._default = default
self._callbacks = WeakKeyDictionary()
self._2arg_callbacks = WeakKeyDictionary()
self._disabled = WeakKeyDictionary()
self._values = WeakKeyDictionary()
if getter is None:
getter = self._default_getter
if setter is None:
setter = self._default_setter
self._getter = getter
self._setter = setter
if docstring is not None:
self.__doc__ = docstring
def _default_getter(self, instance, owner=None):
return self._values.get(instance, self._default)
def _default_setter(self, instance, value):
self._values.__setitem__(instance, value)
def __get__(self, instance, owner=None):
if instance is None:
return self
return self._getter(instance)
def __set__(self, instance, value):
try:
old = self.__get__(instance)
except AttributeError: # pragma: no cover
old = None
self._setter(instance, value)
new = self.__get__(instance)
if old != new:
self.notify(instance, old, new)
def setter(self, func):
"""
Method to use as a decorator, to mimic @property.setter
"""
self._setter = func
return self
def _get_full_info(self, instance):
# Some callback subclasses may contain additional info in addition
# to the main value, and we need to use this full information when
# comparing old and new 'values', so this method is used in that
# case. The result should be a tuple where the first item is the
# actual primary value of the property and the second item is any
# additional data to use in the comparison.
# Note that we need to make sure we convert any list here to a tuple
# to make sure the value is immutable, otherwise comparisons of
# old != new will not show any difference (since the list can still)
# be modified in-place
value = self.__get__(instance)
if isinstance(value, list):
value = tuple(value)
return value, None
def notify(self, instance, old, new):
"""
Call all callback functions with the current value
Each callback will either be called using
callback(new) or callback(old, new) depending
on whether ``echo_old`` was set to `True` when calling
:func:`~echo.add_callback`
Parameters
----------
instance
The instance to consider
old
The old value of the property
new
The new value of the property
"""
if not self.enabled(instance):
return
for cback in self._callbacks.get(instance, []):
cback(new)
for cback in self._2arg_callbacks.get(instance, []):
cback(old, new)
def disable(self, instance):
"""
Disable callbacks for a specific instance
"""
self._disabled[instance] = True
def enable(self, instance):
"""
Enable previously-disabled callbacks for a specific instance
"""
self._disabled[instance] = False
def enabled(self, instance):
return not self._disabled.get(instance, False)
def add_callback(self, instance, func, echo_old=False, priority=0):
"""
Add a callback to a specific instance that manages this property
Parameters
----------
instance
The instance to add the callback to
func : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if echo_old:
self._2arg_callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
else:
self._callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
def remove_callback(self, instance, func):
"""
Remove a previously-added callback
Parameters
----------
instance
The instance to detach the callback from
func : func
The callback function to remove
"""
for cb in [self._callbacks, self._2arg_callbacks]:
if instance not in cb:
continue
if func in cb[instance]:
cb[instance].remove(func)
return
else:
raise ValueError("Callback function not found: %s" % func)
class HasCallbackProperties(object):
"""
A class that adds functionality to subclasses that use callback properties.
"""
def __init__(self):
from .list import ListCallbackProperty
self._global_callbacks = CallbackContainer()
self._ignored_properties = set()
self._delayed_properties = {}
self._delay_global_calls = {}
self._callback_wrappers = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
prop.add_callback(self, self._notify_global_lists)
def _ignore_global_callbacks(self, properties):
# This is to allow ignore_callbacks to work for global callbacks
self._ignored_properties.update(properties)
def _unignore_global_callbacks(self, properties):
# Once this is called, we simply remove properties from _ignored_properties
# and don't call the callbacks. This is used by ignore_callback
self._ignored_properties -= set(properties)
def _delay_global_callbacks(self, properties):
# This is to allow delay_callback to still have an effect in delaying
# global callbacks. We set _delayed_properties to a dictionary of the
# values at the point at which the callbacks are delayed.
self._delayed_properties.update(properties)
def _process_delayed_global_callbacks(self, properties):
# Once this is called, the global callbacks are called once each with
# a dictionary of the current values of properties that have been
# resumed.
kwargs = {}
for prop, new_value in properties.items():
old_value = self._delayed_properties.pop(prop)
if old_value != new_value:
kwargs[prop] = new_value[0]
self._notify_global(**kwargs)
def _notify_global_lists(self, *args):
from .list import ListCallbackProperty
properties = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
callback_list = getattr(self, prop_name)
if callback_list is args[0]:
properties[prop_name] = callback_list
break
self._notify_global(**properties)
def _notify_global(self, **kwargs):
for prop in set(self._delayed_properties) | set(self._ignored_properties):
if prop in kwargs:
kwargs.pop(prop)
if len(kwargs) > 0:
for callback in self._global_callbacks:
callback(**kwargs)
def __setattr__(self, attribute, value):
super(HasCallbackProperties, self).__setattr__(attribute, value)
if self.is_callback_property(attribute):
self._notify_global(**{attribute: value})
def add_callback(self, name, callback, echo_old=False, priority=0):
"""
Add a callback that gets triggered when a callback property of the
class changes.
Parameters
----------
name : str
The instance to add the callback to.
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``callback(old, new)``. If `False`
(the default), will be invoked as ``callback(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
prop.add_callback(self, callback, echo_old=echo_old, priority=priority)
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def remove_callback(self, name, callback):
"""
Remove a previously-added callback
Parameters
----------
name : str
The instance to remove the callback from.
func : func
The callback function to remove
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
try:
prop.remove_callback(self, callback)
except ValueError: # pragma: nocover
pass # Be forgiving if callback was already removed before
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def add_global_callback(self, callback):
"""
Add a global callback function, which is a callback that gets triggered
when any callback properties on the class change.
Parameters
----------
callback : func
The callback function to add
"""
self._global_callbacks.append(callback)
def remove_global_callback(self, callback):
"""
Remove a global callback function.
Parameters
----------
callback : func
The callback function to remove
"""
self._global_callbacks.remove(callback)
def is_callback_property(self, name):
"""
Whether a property (identified by name) is a callback property.
Parameters
----------
name : str
The name of the property to check
"""
return isinstance(getattr(type(self), name, None), CallbackProperty)
def iter_callback_properties(self):
"""
Iterator to loop over all callback properties.
"""
for name in dir(self):
if self.is_callback_property(name):
yield name, getattr(type(self), name)
def add_callback(instance, prop, callback, echo_old=False, priority=0):
"""
Attach a callback function to a property in an instance
Parameters
----------
instance
The instance to add the callback to
prop : str
Name of callback property in `instance`
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
Examples
--------
::
class Foo:
bar = CallbackProperty(0)
def callback(value):
pass
f = Foo()
add_callback(f, 'bar', callback)
"""
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.add_callback(instance, callback, echo_old=echo_old, priority=priority)
def remove_callback(instance, prop, callback):
"""
Remove a callback function from a property in an instance
Parameters
----------
instance
The instance to detach the callback from
prop : str
Name of callback property in `instance`
callback : func
The callback function to remove
"""
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.remove_callback(instance, callback)
def callback_property(getter):
"""
A decorator to build a CallbackProperty.
This is used by wrapping a getter method, similar to the use of @property::
class Foo(object):
@callback_property
def x(self):
return self._x
@x.setter
def x(self, value):
self._x = value
In simple cases with no getter or setter logic, it's easier to create a
:class:`~echo.CallbackProperty` directly::
class Foo(object);
x = CallbackProperty(initial_value)
"""
cb = CallbackProperty(getter=getter)
cb.__doc__ = getter.__doc__
return cb
class delay_callback(object):
"""
Delay any callback functions from one or more callback properties
This is a context manager. Within the context block, no callbacks
will be issued. Each callback will be called once on exit
Parameters
----------
instance
An instance object with callback properties
*props : str
One or more properties within instance to delay
Examples
--------
::
with delay_callback(foo, 'bar', 'baz'):
f.bar = 20
f.baz = 30
f.bar = 10
print('done') # callbacks triggered at this point, if needed
"""
# Class-level registry of properties and how many times the callbacks have
# been delayed. The idea is that when nesting calls to delay_callback, the
# delay count is increased, and every time __exit__ is called, the count is
# decreased, and once the count reaches zero, the callback is triggered.
delay_count = {}
old_values = {}
def __init__(self, instance, *props):
self.instance = instance
self.props = props
def __enter__(self):
delay_props = {}
for prop in self.props:
p = getattr(type(self.instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
if (self.instance, prop) not in self.delay_count:
self.delay_count[self.instance, prop] = 1
self.old_values[self.instance, prop] = p._get_full_info(self.instance)
delay_props[prop] = p._get_full_info(self.instance)
else:
self.delay_count[self.instance, prop] += 1
p.disable(self.instance)
if isinstance(self.instance, HasCallbackProperties):
self.instance._delay_global_callbacks(delay_props)
def __exit__(self, *args):
resume_props = {}
notifications = []
for prop in self.props:
p = getattr(type(self.instance), prop)
if not isinstance(p, CallbackProperty): # pragma: no cover
raise TypeError("%s is not a CallbackProperty" % prop)
if self.delay_count[self.instance, prop] > 1:
self.delay_count[self.instance, prop] -= 1
else:
self.delay_count.pop((self.instance, prop))
old = self.old_values.pop((self.instance, prop))
p.enable(self.instance)
new = p._get_full_info(self.instance)
if old != new:
notifications.append((p, (self.instance, old[0], new[0])))
resume_props[prop] = new
if isinstance(self.instance, HasCallbackProperties):
self.instance._process_delayed_global_callbacks(resume_props)
for p, args in notifications:
p.notify(*args)
@contextmanager
def ignore_callback(instance, *props):
"""
Temporarily ignore any callbacks from one or more callback properties
This is a context manager. Within the context block, no callbacks will be
issued. In contrast with :func:`~echo.delay_callback`, no callbakcs will be
called on exiting the context manager
Parameters
----------
instance
An instance object with callback properties
*props : str
One or more properties within instance to ignore
Examples
--------
::
with ignore_callback(foo, 'bar', 'baz'):
f.bar = 20
f.baz = 30
f.bar = 10
print('done') # no callbacks called
"""
for prop in props:
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.disable(instance)
if isinstance(instance, HasCallbackProperties):
instance._ignore_global_callbacks(props)
yield
for prop in props:
p = getattr(type(instance), prop)
assert isinstance(p, CallbackProperty)
p.enable(instance)
if isinstance(instance, HasCallbackProperties):
instance._unignore_global_callbacks(props)
class keep_in_sync(object):
def __init__(self, instance1, prop1, instance2, prop2):
self.instance1 = weakref.ref(instance1, self.disable_syncing)
self.prop1 = prop1
self.instance2 = weakref.ref(instance2, self.disable_syncing)
self.prop2 = prop2
self._syncing = False
self.enabled = False
self.enable_syncing()
def prop1_from_prop2(self, value):
if not self._syncing:
self._syncing = True
setattr(self.instance1(), self.prop1, getattr(self.instance2(), self.prop2))
self._syncing = False
def prop2_from_prop1(self, value):
if not self._syncing:
self._syncing = True
setattr(self.instance2(), self.prop2, getattr(self.instance1(), self.prop1))
self._syncing = False
def enable_syncing(self, *args):
if self.enabled:
return
add_callback(self.instance1(), self.prop1, self.prop2_from_prop1)
add_callback(self.instance2(), self.prop2, self.prop1_from_prop2)
self.enabled = True
def disable_syncing(self, *args):
if not self.enabled:
return
if self.instance1() is not None:
remove_callback(self.instance1(), self.prop1, self.prop2_from_prop1)
if self.instance2() is not None:
remove_callback(self.instance2(), self.prop2, self.prop1_from_prop2)
self.enabled = False
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/external/echo/core.py",
"copies": "1",
"size": "20093",
"license": "bsd-3-clause",
"hash": 4903858446537994000,
"line_mean": 31.7781402936,
"line_max": 106,
"alpha_frac": 0.6016523167,
"autogenerated": false,
"ratio": 4.59688858384809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.569854090054809,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import weakref
import logging
from abc import ABCMeta, abstractmethod
from glue.utils import CallbackMixin
from glue.core.data_factories import load_data
from glue.core.edit_subset_mode import EditSubsetMode
MAX_UNDO = 50
"""
The classes in this module allow user actions to be stored as commands,
which can be undone/redone
All UI frontends should map interactions to command objects, instead
of directly performing an action.
Commands have access to two sources of data: the first are the
keyword arguments passed to the constructor. These are stored as
attributes of self. The second is a session object passed to all
Command.do and Command.undo calls.
"""
class Command(object):
"""
A class to encapsulate (and possibly undo) state changes
Subclasses of this abstract base class must implement the
`do` and `undo` methods.
Both `do` and `undo` receive a single input argument named
`session` -- this is whatever object is passed to the constructor
of :class:`glue.core.command.CommandStack`. This object is used
to store and retrieve resources needed by each command. The
Glue application itself uses a :class:`~glue.core.session.Session`
instance for this.
Each class should also override the class-level kwargs list,
to list the required keyword arguments that should be passed to the
command constructor. The base class will check that these
keywords are indeed provided. Commands should not take
non-keyword arguments in the constructor method
"""
__metaclass__ = ABCMeta
kwargs = []
def __init__(self, **kwargs):
kwargs = kwargs.copy()
for k in self.kwargs:
if k not in kwargs:
raise RuntimeError("Required keyword %s not passed to %s" %
(k, type(self)))
setattr(self, k, kwargs.pop(k))
self.extra = kwargs
@abstractmethod
def do(self, session):
"""
Execute the command
:param session: An object used to store and fetch resources
needed by a Command.
"""
pass
@abstractmethod
def undo(self, session):
pass
@property
def label(self):
return type(self).__name__
class CommandStack(CallbackMixin):
"""
The command stack collects commands,
and saves them to enable undoing/redoing
After instantiation, something can be assigned to
the session property. This is passed as the sole argument
of all Command (un)do methods.
"""
def __init__(self):
super(CommandStack, self).__init__()
self._session = None
self._command_stack = []
self._undo_stack = []
@property
def session(self):
return self._session
@session.setter
def session(self, value):
self._session = value
@property
def undo_label(self):
""" Brief label for the command reversed by an undo """
if len(self._command_stack) == 0:
return ''
cmd = self._command_stack[-1]
return cmd.label
@property
def redo_label(self):
""" Brief label for the command executed on a redo"""
if len(self._undo_stack) == 0:
return ''
cmd = self._undo_stack[-1]
return cmd.label
def do(self, cmd):
"""
Execute and log a new command
:rtype: The return value of cmd.do()
"""
logging.getLogger(__name__).debug("Do %s", cmd)
self._command_stack.append(cmd)
result = cmd.do(self._session)
self._command_stack = self._command_stack[-MAX_UNDO:]
self._undo_stack = []
self.notify('do')
return result
def undo(self):
"""
Undo the previous command
:raises: IndexError, if there are no objects to undo
"""
try:
c = self._command_stack.pop()
logging.getLogger(__name__).debug("Undo %s", c)
except IndexError:
raise IndexError("No commands to undo")
self._undo_stack.append(c)
c.undo(self._session)
self.notify('undo')
def redo(self):
"""
Redo the previously-undone command
:raises: IndexError, if there are no undone actions
"""
try:
c = self._undo_stack.pop()
logging.getLogger(__name__).debug("Undo %s", c)
except IndexError:
raise IndexError("No commands to redo")
result = c.do(self._session)
self._command_stack.append(c)
self.notify('redo')
return result
def can_undo_redo(self):
"""
Return whether undo and redo options are possible
:rtype: (bool, bool) - Whether undo and redo are possible, respectively
"""
return len(self._command_stack) > 0, len(self._undo_stack) > 0
class LoadData(Command):
kwargs = ['path', 'factory']
label = 'load data'
def do(self, session):
return load_data(self.path, self.factory)
def undo(self, session):
pass
class AddData(Command):
kwargs = ['data']
label = 'add data'
def do(self, session):
session.data_collection.append(self.data)
def undo(self, session):
session.data_collection.remove(self.data)
class RemoveData(Command):
kwargs = ['data']
label = 'remove data'
def do(self, session):
session.data_collection.remove(self.data)
def undo(self, session):
session.data_collection.append(self.data)
class NewDataViewer(Command):
"""Add a new data viewer to the application
:param viewer: The class of viewer to create
:param data: The data object to initialize the viewer with, or None
:type date: :class:`~glue.core.data.Data` or None
"""
kwargs = ['viewer', 'data']
label = 'new data viewer'
def do(self, session):
v = session.application.new_data_viewer(self.viewer, self.data)
self.created = weakref.ref(v)
return v
def undo(self, session):
created = self.created()
if created is not None:
created.close(warn=False)
class AddLayer(Command):
"""Add a new layer to a viewer
:param layer: The layer to add
:type layer: :class:`~glue.core.data.Data` or :class:`~glue.core.subset.Subset`
:param viewer: The viewer to add the layer to
"""
kwargs = ['layer', 'viewer']
label = 'add layer'
def do(self, session):
self.viewer.add_layer(self.layer)
def undo(self, session):
self.viewer.remove_layer(self.layer)
class ApplyROI(Command):
"""
Apply an ROI to a data collection, updating subset states
Parameters
----------
data_collection: :class:`~glue.core.data_collection.DataCollection`
DataCollection to operate on
roi: :class:`~glue.core.roi.Roi`
ROI to apply
apply_func: callable
The function to call which takes the ROI and actually applies it.
"""
kwargs = ['data_collection', 'roi', 'apply_func']
label = 'apply ROI'
def do(self, session):
self.old_states = {}
for data in self.data_collection:
for subset in data.subsets:
self.old_states[subset] = subset.subset_state
self.apply_func(self.roi)
def undo(self, session):
for data in self.data_collection:
for subset in data.subsets:
if subset not in self.old_states:
subset.delete()
for k, v in self.old_states.items():
k.subset_state = v
class ApplySubsetState(Command):
"""
Apply an ROI to a data collection, updating subset states
Parameters
----------
data_collection: :class:`~glue.core.data_collection.DataCollection`
DataCollection to operate on
subset_state: :class:`~glue.core.subset_state.SubsetState`
Subset state to apply
"""
kwargs = ['data_collection', 'subset_state']
label = 'apply subset'
def do(self, session):
self.old_states = {}
for data in self.data_collection:
for subset in data.subsets:
self.old_states[subset] = subset.subset_state
mode = EditSubsetMode()
mode.update(self.data_collection, self.subset_state)
def undo(self, session):
for data in self.data_collection:
for subset in data.subsets:
if subset not in self.old_states:
subset.delete()
for k, v in self.old_states.items():
k.subset_state = v
class LinkData(Command):
pass
class SetViewState(Command):
pass
class NewTab(Command):
pass
class CloseTab(Command):
pass
class NewSubset(Command):
pass
class CopySubset(Command):
pass
class PasteSubset(Command):
pass
class SpecialPasteSubset(Command):
pass
class DeleteSubset(Command):
pass
class SetStyle(Command):
pass
class SetLabel(Command):
pass
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/core/command.py",
"copies": "1",
"size": "9078",
"license": "bsd-3-clause",
"hash": 377555847735910500,
"line_mean": 24.716713881,
"line_max": 83,
"alpha_frac": 0.6156642432,
"autogenerated": false,
"ratio": 4.098419864559819,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5214084107759819,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import workflows.recipe
class RecipeWrapper(object):
'''A wrapper object which contains a recipe and a number of functions to make
life easier for recipe users.
'''
def __init__(self, message=None, transport=None, recipe=None, **kwargs):
'''Create a RecipeWrapper object from a wrapped message.
References to the transport layer are required to send directly to
connected downstream processes.
'''
if message:
self.recipe = workflows.recipe.Recipe(message['recipe'])
self.recipe_pointer = int(message['recipe-pointer'])
self.recipe_step = self.recipe[self.recipe_pointer]
self.recipe_path = message.get('recipe-path', [])
self.environment = message.get('environment', {})
self.payload = message.get('payload')
elif recipe:
if isinstance(recipe, workflows.recipe.Recipe):
self.recipe = recipe
else:
self.recipe = workflows.recipe.Recipe(recipe)
self.recipe_pointer = None
self.recipe_step = None
self.recipe_path = []
self.environment = {}
self.payload = None
else:
raise ValueError('A message or recipe is required to create ' \
'a RecipeWrapper object.')
self.default_channel = None
self.transport = transport
def send(self, *args, **kwargs):
'''Send messages to another service that is connected to the currently
running service via the recipe. The 'send' method will either use a
default channel name, set via the set_default_channel method, or an
unnamed output definition.
'''
if not self.transport:
raise ValueError('This RecipeWrapper object does not contain ' \
'a reference to a transport object.')
if not self.recipe_step:
raise ValueError('This RecipeWrapper object does not contain ' \
'a recipe with a selected step.')
if 'output' not in self.recipe_step:
# The current recipe step does not have output channels.
return
if isinstance(self.recipe_step['output'], dict):
# The current recipe step does have named output channels.
if self.default_channel:
# Use named output channel
self.send_to(self.default_channel, *args, **kwargs)
else:
# The current recipe step does have unnamed output channels.
self._send_to_destinations(self.recipe_step['output'],
*args, **kwargs)
def send_to(self, channel, *args, **kwargs):
'''Send messages to another service that is connected to the currently
running service via the recipe. Discard messages if the recipe does
not have anything connected to the specified output channel.
'''
if not self.transport:
raise ValueError('This RecipeWrapper object does not contain ' \
'a reference to a transport object.')
if not self.recipe_step:
raise ValueError('This RecipeWrapper object does not contain ' \
'a recipe with a selected step.')
if 'output' not in self.recipe_step:
# The current recipe step does not have output channels.
return
if not isinstance(self.recipe_step['output'], dict):
# The current recipe step does not have named output channels.
if self.default_channel == channel:
# Use unnamed output channels
self.send(*args, **kwargs)
return
if channel not in self.recipe_step['output']:
# The current recipe step does not have an output channel with this name.
return
self._send_to_destinations(self.recipe_step['output'][channel],
*args, **kwargs)
def set_default_channel(self, channel):
'''Define one named output channel to be equivalent to unnamed output
channels. For this channel send() and send_to() will be identical.'''
self.default_channel = channel
def start(self, header=None, **kwargs):
'''Trigger the start of a recipe, sending the defined payloads to the
recipients set in the recipe. Any parameters to this function are
passed to the transport send/broadcast methods.
If the wrapped recipe has already been started then a ValueError will
be raised.
'''
if not self.transport:
raise ValueError('This RecipeWrapper object does not contain ' \
'a reference to a transport object.')
if self.recipe_step:
raise ValueError('This recipe has already been started.')
for destination, payload in self.recipe['start']:
self._send_to_destination(destination, header, payload, kwargs)
def checkpoint(self, message, header=None, delay=0, **kwargs):
'''Send a message to the current recipe destination. This can be used to
keep a state for longer processing tasks.
:param delay: Delay transport of message by this many seconds
'''
if not self.transport:
raise ValueError('This RecipeWrapper object does not contain ' \
'a reference to a transport object.')
if not self.recipe_step:
raise ValueError('This RecipeWrapper object does not contain ' \
'a recipe with a selected step.')
kwargs['delay'] = delay
self._send_to_destination(self.recipe_pointer, header, message, kwargs, \
add_path_step=False)
def apply_parameters(self, parameters):
'''Recursively apply parameter replacement (see recipe.py) to the wrapped
recipe, updating internal references afterwards.
While this operation is useful for testing it should not be used in
production. Replacing parameters means that the recipe changes as it is
passed down the chain of services. This makes debugging very difficult.
'''
self.recipe.apply_parameters(parameters)
self.recipe_step = self.recipe[self.recipe_pointer]
def _generate_full_recipe_message(self, destination, message, add_path_step):
'''Factory function to generate independent message objects for
downstream recipients with different destinations.'''
if add_path_step and self.recipe_pointer:
recipe_path = self.recipe_path + [ self.recipe_pointer ]
else:
recipe_path = self.recipe_path
return {
'environment': self.environment,
'payload': message,
'recipe': self.recipe.recipe,
'recipe-path': recipe_path,
'recipe-pointer': destination,
}
def _send_to_destinations(self, destinations, message, header=None, **kwargs):
'''Send messages to a list of numbered destinations. This is an internal
helper method used by the public 'send' methods.
'''
if not isinstance(destinations, list):
destinations = ( destinations, )
for destination in destinations:
self._send_to_destination(destination, header, message, kwargs)
def _send_to_destination(self, destination, header, payload, \
transport_kwargs, add_path_step=True):
'''Helper function to send a message to a specific recipe destination.'''
if header:
header = header.copy()
header['workflows-recipe'] = True
else:
header = { 'workflows-recipe': True }
dest_kwargs = transport_kwargs.copy()
if 'transport-delay' in self.recipe[destination] and \
'delay' not in transport_kwargs:
dest_kwargs['delay'] = self.recipe[destination]['transport-delay']
if self.recipe[destination].get('queue'):
self.transport.send(
self.recipe[destination]['queue'],
self._generate_full_recipe_message(destination, payload, add_path_step),
headers=header, **dest_kwargs)
if self.recipe[destination].get('topic'):
self.transport.broadcast(
self.recipe[destination]['topic'],
self._generate_full_recipe_message(destination, payload, add_path_step),
headers=header, **dest_kwargs)
| {
"repo_name": "xia2/workflows",
"path": "workflows/recipe/wrapper.py",
"copies": "1",
"size": "7999",
"license": "bsd-3-clause",
"hash": 1529886499157076000,
"line_mean": 40.0205128205,
"line_max": 82,
"alpha_frac": 0.658832354,
"autogenerated": false,
"ratio": 4.488776655443322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009028049538223995,
"num_lines": 195
} |
from __future__ import absolute_import, division, print_function
import zipfile
from fsspec import open_files
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class ZipFileSystem(AbstractArchiveFileSystem):
"""Read contents of ZIP archive as a file-system
Keeps file object open while instance lives.
This class is pickleable, but not necessarily thread-safe
"""
root_marker = ""
protocol = "zip"
def __init__(
self,
fo="",
mode="r",
target_protocol=None,
target_options=None,
block_size=DEFAULT_BLOCK_SIZE,
**kwargs,
):
"""
Parameters
----------
fo: str or file-like
Contains ZIP, and must exist. If a str, will fetch file using
`open_files()`, which must return one file exactly.
mode: str
Currently, only 'r' accepted
target_protocol: str (optional)
If ``fo`` is a string, this value can be used to override the
FS protocol inferred from a URL
target_options: dict (optional)
Kwargs passed when instantiating the target FS, if ``fo`` is
a string.
"""
super().__init__(self, **kwargs)
if mode != "r":
raise ValueError("Only read from zip files accepted")
if isinstance(fo, str):
files = open_files(fo, protocol=target_protocol, **(target_options or {}))
if len(files) != 1:
raise ValueError(
'Path "{}" did not resolve to exactly'
'one file: "{}"'.format(fo, files)
)
fo = files[0]
self.fo = fo.__enter__() # the whole instance is a context
self.zip = zipfile.ZipFile(self.fo)
self.block_size = block_size
self.dir_cache = None
@classmethod
def _strip_protocol(cls, path):
# zip file paths are always relative to the archive root
return super()._strip_protocol(path).lstrip("/")
def _get_dirs(self):
if self.dir_cache is None:
files = self.zip.infolist()
self.dir_cache = {
dirname + "/": {"name": dirname + "/", "size": 0, "type": "directory"}
for dirname in self._all_dirnames(self.zip.namelist())
}
for z in files:
f = {s: getattr(z, s) for s in zipfile.ZipInfo.__slots__}
f.update(
{
"name": z.filename,
"size": z.file_size,
"type": ("directory" if z.is_dir() else "file"),
}
)
self.dir_cache[f["name"]] = f
def cat(self, path, callback=None, **kwargs):
return self.zip.read(path)
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
path = self._strip_protocol(path)
if mode != "rb":
raise NotImplementedError
info = self.info(path)
out = self.zip.open(path, "r")
out.size = info["size"]
out.name = info["name"]
return out
| {
"repo_name": "intake/filesystem_spec",
"path": "fsspec/implementations/zip.py",
"copies": "1",
"size": "3287",
"license": "bsd-3-clause",
"hash": 4395398571788153300,
"line_mean": 30.9126213592,
"line_max": 86,
"alpha_frac": 0.5223608153,
"autogenerated": false,
"ratio": 4.279947916666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022318937618569356,
"num_lines": 103
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from abc import ABCMeta, abstractmethod
class AbstractConsumer:
__metaclass__ = ABCMeta
"""
This class provides facilities to create and manage queue consumers. To
create a consumer, subclass this class and override the :meth:`run`
method. Then, instantiate the class with the desired parameters and call
:meth:`declare` to declare the consumer to the server.
Example::
class Consumer(AbstractConsumer):
def run(self, msg: Message):
print('Received message: {}'.format(msg.body))
msg.ack()
c1 = Consumer(ch, 'test.q')
c1.declare()
conn.drain_events()
"""
def __init__(self, channel, queue, consumer_tag='', no_local=False,
no_ack=False, exclusive=False):
"""
:param channel: channel
:type channel: amqpy.channel.Channel
:param str queue: queue
:param str consumer_tag: consumer tag, local to the connection; leave
blank to let server auto-assign a tag
:param bool no_local: if True: do not deliver own messages
:param bool no_ack: server will not expect an ack for each message
:param bool exclusive: request exclusive access
"""
self.channel = channel
self.queue = queue
self.consumer_tag = consumer_tag
self.no_local = no_local
self.no_ack = no_ack
self.exclusive = exclusive
#: Number of messages consumed (incremented automatically)
self.consume_count = 0
def declare(self):
"""Declare the consumer
This method calls :meth:`~amqpy.channel.Channel.basic_consume()`
internally.
After the queue consumer is created, :attr:`self.consumer_tag` is
set to the server-assigned consumer tag if a tag was not specified
initially.
"""
self.consumer_tag = self.channel.basic_consume(
self.queue, self.consumer_tag, self.no_local, self.no_ack, self.exclusive,
callback=self.start, on_cancel=self.cancel_cb)
def cancel(self):
"""Cancel the consumer
"""
self.channel.basic_cancel(self.consumer_tag)
@abstractmethod
def run(self, msg):
"""Consumer callback
This method is called when the consumer is delivered a message. This
method **must** be overridden in the subclass.
:param msg: received message
:type msg: amqpy.message.Message
"""
pass
def cancel_cb(self, consumer_tag):
"""Consumer cancel callback
This method is called when the consumer is cancelled. This method may
be overridden in the subclass.
:param str consumer_tag: consumer tag
"""
pass
def start(self, msg):
self.run(msg)
self.consume_count += 1
| {
"repo_name": "veegee/amqpy",
"path": "amqpy/consumer.py",
"copies": "1",
"size": "2942",
"license": "mit",
"hash": 6078707061962467000,
"line_mean": 30.6344086022,
"line_max": 86,
"alpha_frac": 0.6186267845,
"autogenerated": false,
"ratio": 4.4174174174174174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00012359411692003462,
"num_lines": 93
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from collections import namedtuple
queue_declare_ok_t = namedtuple('queue_declare_ok_t', ['queue', 'message_count', 'consumer_count'])
basic_return_t = namedtuple('basic_return_t',
['reply_code', 'reply_text', 'exchange', 'routing_key', 'message'])
method_t = namedtuple('method_t', ['class_id', 'method_id'])
#: The default, minimum frame size that both the client and server must be able to handle
FRAME_MIN_SIZE = 4096
class FrameType:
"""This class contains frame-related constants
METHOD, HEADER, BODY, and HEARTBEAT are all frame type constants which make up the first byte
of every frame. The END constant is the termination value which is the last byte of every frame.
"""
METHOD = 1 # method frame
HEADER = 2 # content header frame
BODY = 3 # content body frame
HEARTBEAT = 8 # heartbeat frame
END = 206 # not actually a frame type; this is the frame terminator byte
class Connection:
CLASS_ID = 10
Start = method_t(10, 10)
StartOk = method_t(10, 11)
Secure = method_t(10, 20)
SecureOk = method_t(10, 21)
Tune = method_t(10, 30)
TuneOk = method_t(10, 31)
Open = method_t(10, 40)
OpenOk = method_t(10, 41)
Close = method_t(10, 50)
CloseOk = method_t(10, 51)
Blocked = method_t(10, 60)
Unblocked = method_t(10, 61)
class Channel:
CLASS_ID = 20
Open = method_t(20, 10)
OpenOk = method_t(20, 11)
Flow = method_t(20, 20)
FlowOk = method_t(20, 21)
Close = method_t(20, 40)
CloseOk = method_t(20, 41)
class Exchange:
CLASS_ID = 40
Declare = method_t(40, 10)
DeclareOk = method_t(40, 11)
Delete = method_t(40, 20)
DeleteOk = method_t(40, 21)
Bind = method_t(40, 30)
BindOk = method_t(40, 31)
Unbind = method_t(40, 40)
UnbindOk = method_t(40, 51)
class Queue:
CLASS_ID = 50
Declare = method_t(50, 10)
DeclareOk = method_t(50, 11)
Bind = method_t(50, 20)
BindOk = method_t(50, 21)
Purge = method_t(50, 30)
PurgeOk = method_t(50, 31)
Delete = method_t(50, 40)
DeleteOk = method_t(50, 41)
Unbind = method_t(50, 50)
UnbindOk = method_t(50, 51)
class Basic:
CLASS_ID = 60
Qos = method_t(60, 10)
QosOk = method_t(60, 11)
Consume = method_t(60, 20)
ConsumeOk = method_t(60, 21)
Cancel = method_t(60, 30)
CancelOk = method_t(60, 31)
Publish = method_t(60, 40)
Return = method_t(60, 50)
Deliver = method_t(60, 60)
Get = method_t(60, 70)
GetOk = method_t(60, 71)
GetEmpty = method_t(60, 72)
Ack = method_t(60, 80)
Reject = method_t(60, 90)
RecoverAsync = method_t(60, 100)
Recover = method_t(60, 110)
RecoverOk = method_t(60, 111)
class Confirm:
CLASS_ID = 85
Select = method_t(85, 10)
SelectOk = method_t(85, 11)
class Tx:
CLASS_ID = 90
Select = method_t(90, 10)
SelectOk = method_t(90, 11)
Commit = method_t(90, 20)
CommitOk = method_t(90, 21)
Rollback = method_t(90, 30)
RollbackOk = method_t(90, 31)
| {
"repo_name": "veegee/amqpy",
"path": "amqpy/spec.py",
"copies": "1",
"size": "3150",
"license": "mit",
"hash": 7623663350077004000,
"line_mean": 24.6097560976,
"line_max": 100,
"alpha_frac": 0.6117460317,
"autogenerated": false,
"ratio": 2.9275092936802976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40392553253802976,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from datetime import datetime
from decimal import Decimal
import pickle
from .. import Message
class TestBasicMessage:
def check_proplist(self, msg):
"""Check roundtrip processing of a single object
"""
raw_properties = msg.serialize_properties()
new_msg = Message()
new_msg.load_properties(raw_properties)
new_msg.body = msg.body
assert msg == new_msg
def test_eq(self):
msg = Message('hello', content_type='text/plain')
assert msg
# Make sure that something that looks vaguely like a Message doesn't raise an Attribute
# error when compared to a Message, and instead returns False
class FakeMsg:
pass
fake_msg = FakeMsg()
fake_msg.properties = {'content_type': 'text/plain'}
assert msg != fake_msg
def test_pickle(self):
msg = Message(
'some body' * 200000,
content_type='text/plain',
content_encoding='utf-8',
application_headers={'foo': 7, 'bar': 'baz', 'd2': {'foo2': 'xxx', 'foo3': -1}},
delivery_mode=1,
priority=7,
)
msg2 = pickle.loads(pickle.dumps(msg, -1))
assert msg == msg2
def test_roundtrip(self):
"""Check round-trip processing of content-properties
"""
self.check_proplist(Message())
self.check_proplist(Message(content_type='text/plain'))
self.check_proplist(Message(
content_type='text/plain',
content_encoding='utf-8',
application_headers={'foo': 7, 'bar': 'baz', 'd2': {'foo2': 'xxx', 'foo3': -1}},
delivery_mode=1,
priority=7,
))
self.check_proplist(Message(
application_headers={
'regular': datetime(2007, 11, 12, 12, 34, 56),
'dst': datetime(2007, 7, 12, 12, 34, 56),
},
))
n = datetime.now()
# AMQP only does timestamps to 1-second resolution
n = n.replace(microsecond=0)
self.check_proplist(Message(application_headers={'foo': n}))
self.check_proplist(Message(application_headers={'foo': Decimal('10.1')}))
self.check_proplist(Message(application_headers={'foo': Decimal('-1987654.193')}))
self.check_proplist(Message(timestamp=datetime(1980, 1, 2, 3, 4, 6)))
| {
"repo_name": "veegee/amqpy",
"path": "amqpy/tests/test_basic_message.py",
"copies": "1",
"size": "2475",
"license": "mit",
"hash": -7302332163755806000,
"line_mean": 31.5657894737,
"line_max": 95,
"alpha_frac": 0.5761616162,
"autogenerated": false,
"ratio": 3.9473684210526314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5023530037252631,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from threading import Lock
import sys
from collections import defaultdict, deque
import six
import logging
import socket
import errno
from .utils import get_errno
if six.PY2:
from Queue import Queue
else:
from queue import Queue
from .concurrency import synchronized
from .exceptions import UnexpectedFrame, Timeout, METHOD_NAME_MAP
from .proto import Method
from . import spec
from .spec import FrameType
log = logging.getLogger('amqpy')
__all__ = ['MethodReader']
# these received methods are followed by content headers and bodies
_CONTENT_METHODS = [
spec.Basic.Return,
spec.Basic.Deliver,
spec.Basic.GetOk,
]
class MethodReader:
"""Read frames from the server and construct complete methods
There should be one `MethodReader` instance per connection.
In the case of a framing error, an :exc:`AMQPConnectionError` is placed in the queue.
In the case of unexpected frames, an :exc:`ChannelError` is placed in the queue.
"""
def __init__(self, transport):
"""
:param transport: transport to read from
:type transport: amqpy.transport.Transport
"""
self.transport = transport
self.sock = transport.sock
# deque[Method or Exception]
self.method_queue = deque()
# dict[channel_id int: PartialMessage]
self.partial_methods = {}
# next expected frame type for each channel
# dict[channel_id int: frame_type int]
self.expected_types = defaultdict(lambda: FrameType.METHOD)
self.frames_recv = 0 # total number of frames received
self._method_read_lock = Lock()
def _next_method(self):
"""Read the next method from the source and process it
Once one complete method has been assembled, it is placed in the internal queue. This
method will block until a complete `Method` has been constructed, which may consist of one
or more frames.
"""
while not self.method_queue:
# keep reading frames until we have at least one complete method in the queue
try:
frame = self.transport.read_frame()
except Exception as exc:
# connection was closed? framing error?
if six.PY2:
_, _, tb = sys.exc_info()
exc.tb = tb
self.method_queue.append(exc)
break
self.frames_recv += 1
if frame.frame_type not in (self.expected_types[frame.channel], 8):
msg = 'Received frame type {} while expecting type: {}' \
.format(frame.frame_type, self.expected_types[frame.channel])
self.method_queue.append(UnexpectedFrame(msg, channel_id=frame.channel))
elif frame.frame_type == FrameType.METHOD:
self._process_method_frame(frame)
elif frame.frame_type == FrameType.HEADER:
self._process_content_header(frame)
elif frame.frame_type == FrameType.BODY:
self._process_content_body(frame)
def _process_method_frame(self, frame):
"""Process method frame
:param frame: incoming frame
:type frame: amqpy.proto.Frame
"""
channel_id = frame.channel
method = Method()
method.load_method_frame(frame)
if method.method_type in _CONTENT_METHODS:
# save what we've got so far and wait for the content header frame
self.partial_methods[channel_id] = method
self.expected_types[channel_id] = spec.FrameType.HEADER
else:
# this is a complete method
self.method_queue.append(method)
def _process_content_header(self, frame):
"""Process Content Header frames
:param frame: incoming frame
:type frame: amqpy.proto.Frame
"""
#: :type: amqpy.proto.Method
method = self.partial_methods[frame.channel]
method.load_header_frame(frame)
if method.complete:
# a bodyless message, we're done
self.method_queue.append(method)
self.partial_methods.pop(frame.channel, None)
del self.expected_types[frame.channel] # reset expected frame type for this channel
else:
# next expected frame type is FrameType.BODY
self.expected_types[frame.channel] = spec.FrameType.BODY
def _process_content_body(self, frame):
"""Process Content Body frames
:param frame: incoming frame
:type frame: amqpy.proto.Frame
"""
method = self.partial_methods[frame.channel]
method.load_body_frame(frame)
if method.complete:
# message is complete, append it to the queue
self.method_queue.append(method)
self.partial_methods.pop(frame.channel, None)
del self.expected_types[frame.channel] # reset expected frame type for this channel
@synchronized('_method_read_lock')
def _read_method(self):
"""Read a method from the peer
:return: method
:rtype: amqpy.proto.Method
"""
# fully read and process next method
self._next_method()
method = self.method_queue.popleft()
# `method` may sometimes be an `Exception`, raise it here
if isinstance(method, Exception):
raise method
log.debug('{:7} channel: {} {} {}'
.format('Read:', method.channel_id,
method.method_type, METHOD_NAME_MAP[method.method_type]))
return method
def read_method(self, timeout=None):
"""Read method
:param timeout: timeout
:type timeout: float
:return: method
:rtype: amqpy.proto.Method
:raise amqpy.exceptions.Timeout: if the operation times out
"""
if timeout is None:
return self._read_method()
orig_timeout = self.sock.gettimeout()
if orig_timeout != timeout:
self.sock.settimeout(timeout)
try:
return self._read_method()
except socket.timeout:
raise Timeout()
except socket.error as e:
if get_errno(e) == errno.EAGAIN:
raise Timeout()
raise
finally:
if orig_timeout != timeout:
self.sock.settimeout(orig_timeout)
class MethodWriter:
"""Write methods to the server by breaking them up and constructing multiple frames
There should be one `MethodWriter` instance per connection, and all channels share that
instance. This class is thread-safe. Any thread may call :meth:`write_method()` as long as no
more than one thread is writing to any given `channel_id` at a time.
"""
def __init__(self, transport, frame_max):
"""
:param transport: transport to write to
:param frame_max: maximum frame payload size in bytes
:type transport: amqpy.transport.Transport
:type frame_max: int
"""
self.transport = transport
self.frame_max = frame_max
self.methods_sent = 0 # total number of methods sent
def write_method(self, method):
"""Write method to connection, destined for the channel as set in `method.channel_id`
This implementation uses a queue internally to prepare all frames before writing in order
to detect issues.
This method is thread safe only if the `channel_id` parameter is unique across concurrent
invocations. The AMQP protocol allows interleaving frames destined for different channels,
but not within the same channel. This means no more than one thread may safely operate on
any given channel.
:param method: method to write
:type method: amqpy.proto.Method
"""
transport = self.transport
log.debug('{:7} channel: {} {} {}'
.format('Write:', method.channel_id,
method.method_type, METHOD_NAME_MAP[method.method_type]))
frames = Queue()
# construct a method frame
frames.put(method.dump_method_frame())
if method.content:
# construct a header frame
frames.put(method.dump_header_frame())
# construct one or more body frames, which contain the body of the `Message`
chunk_size = self.frame_max - 8
for frame in method.dump_body_frame(chunk_size):
frames.put(frame)
while not frames.empty():
transport.write_frame(frames.get())
self.methods_sent += 1
| {
"repo_name": "veegee/amqpy",
"path": "amqpy/method_io.py",
"copies": "1",
"size": "8751",
"license": "mit",
"hash": -2020871074423022600,
"line_mean": 33.4527559055,
"line_max": 98,
"alpha_frac": 0.6144440635,
"autogenerated": false,
"ratio": 4.390868038133467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000856733010775787,
"num_lines": 254
} |
Subsets and Splits