repo_name
stringlengths 6
100
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 935
727k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jinglining/flink | flink-python/pyflink/table/utils.py | 7 | 2811 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.table.types import DataType, LocalZonedTimestampType
def pandas_to_arrow(schema, timezone, field_types, series):
import pyarrow as pa
def create_array(s, t):
try:
return pa.Array.from_pandas(s, mask=s.isnull(), type=t)
except pa.ArrowException as e:
error_msg = "Exception thrown when converting pandas.Series (%s) to " \
"pyarrow.Array (%s)."
raise RuntimeError(error_msg % (s.dtype, t), e)
arrays = [create_array(
tz_convert_to_internal(series[i], field_types[i], timezone),
schema.types[i]) for i in range(0, len(schema))]
return pa.RecordBatch.from_arrays(arrays, schema)
def arrow_to_pandas(timezone, field_types, batches):
import pyarrow as pa
table = pa.Table.from_batches(batches)
return [tz_convert_from_internal(c.to_pandas(date_as_object=True), t, timezone)
for c, t in zip(table.itercolumns(), field_types)]
def tz_convert_from_internal(s, t: DataType, local_tz):
"""
Converts the timestamp series from internal according to the specified local timezone.
Returns the same series if the series is not a timestamp series. Otherwise,
returns a converted series.
"""
if type(t) == LocalZonedTimestampType:
return s.dt.tz_localize(local_tz)
else:
return s
def tz_convert_to_internal(s, t: DataType, local_tz):
"""
Converts the timestamp series to internal according to the specified local timezone.
"""
if type(t) == LocalZonedTimestampType:
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
if is_datetime64_dtype(s.dtype):
return s.dt.tz_localize(None)
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(local_tz).dt.tz_localize(None)
return s
| apache-2.0 |
allinpaybusiness/ACS | allinpay projects/creditscoredivmax/creditscore.py | 1 | 23510 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats as ss
import sklearn.cluster as skcluster
from sklearn import metrics
from sklearn import preprocessing
#import imblearn.under_sampling
#import imblearn.over_sampling
#import imblearn.combine
class CreditScore:
def __init__(self, dataname):
#dataname指定导入那个数据集
self.dataname = dataname
#读取数据集
if self.dataname == 'german':
self.data = pd.read_table('..\\..\\raw data\\credit scoring\\german.data.txt',header=None,delim_whitespace=True)
#重新命名特征变量A1A2A3...和违约变量default
names = ['A1']
for i in range(1,self.data.shape[1] - 1):
names.append('A' + str(i+1))
names.append('default')
self.data.columns = names
#german数据中1=good 2=bad 转换成0=good 1=bad
self.data.default = self.data.default.replace({1:0, 2:1})
if self.dataname == 'HMEQ':
self.data = pd.read_csv('raw data\\credit scoring\\HMEQ.csv')
self.data = self.data.rename(columns = {'BAD':'default'})
self.data['MORTDUE'] = pd.to_numeric(self.data['MORTDUE'], errors='coerce')
self.data['MORTDUE'] = self.data['MORTDUE'].fillna(self.data['MORTDUE'].mean())
self.data['VALUE'] = pd.to_numeric(self.data['VALUE'], errors='coerce')
self.data['VALUE'] = self.data['VALUE'].fillna(self.data['VALUE'].mean())
self.data['REASON'] = self.data['REASON'].fillna("NA")
self.data['JOB'] = self.data['JOB'].fillna("NA")
self.data['YOJ'] = pd.to_numeric(self.data['YOJ'], errors='coerce')
self.data['YOJ'] = self.data['YOJ'].fillna(self.data['YOJ'].mean())
self.data['DEROG'] = pd.to_numeric(self.data['DEROG'], errors='coerce')
self.data['DEROG'] = self.data['DEROG'].fillna(self.data['DEROG'].mean())
self.data['DELINQ'] = pd.to_numeric(self.data['DELINQ'], errors='coerce')
self.data['DELINQ'] = self.data['DELINQ'].fillna(self.data['DELINQ'].mean())
self.data['CLAGE'] = pd.to_numeric(self.data['CLAGE'], errors='coerce')
self.data['CLAGE'] = self.data['CLAGE'].fillna(self.data['CLAGE'].mean())
self.data['NINQ'] = pd.to_numeric(self.data['NINQ'], errors='coerce')
self.data['NINQ'] = self.data['NINQ'].fillna(self.data['NINQ'].mean())
self.data['CLNO'] = pd.to_numeric(self.data['CLNO'], errors='coerce')
self.data['CLNO'] = self.data['DEROG'].fillna(self.data['CLNO'].mean())
self.data['DEBTINC'] = pd.to_numeric(self.data['DEBTINC'], errors='coerce')
self.data['DEBTINC'] = self.data['DEBTINC'].fillna(self.data['DEBTINC'].mean())
if self.dataname == 'taiwancredit':
self.data = pd.read_csv('raw data\\credit scoring\\taiwancredit.csv')
self.data = self.data.rename(columns = {'default payment next month':'default'})
#sex education marriage 转化为字符变量
self.data['SEX'] = self.data['SEX'].astype('str')
self.data['EDUCATION'] = self.data['EDUCATION'].astype('str')
self.data['MARRIAGE'] = self.data['MARRIAGE'].astype('str')
def categoricalwoe(self):
#进行粗分类和woe转换
datawoe = self.data.copy()
for col in datawoe.columns:
if col == 'default':
continue
#首先判断是否是名义变量
if datawoe[col].dtype == 'O':
for cat in datawoe[col].unique():
#计算单个分类的woe
nob = max(1, sum((datawoe.default == 1) & (datawoe[col] == cat)))
tnob = sum(datawoe.default == 1)
nog = max(1, sum((datawoe.default == 0) & (datawoe[col] == cat)))
tnog = sum(datawoe.default == 0)
woei = np.log((nob/tnob)/(nog/tnog))
datawoe[col] = datawoe[col].replace({cat:woei})
return datawoe
def binandwoe(self, nclusters, cmethod):
#进行粗分类和woe转换
#进行粗分类(bin)时,bq=True对连续变量等分位数分段,bp=False对连续变量等宽分段
datawoe = self.data.copy()
for col in datawoe.columns:
if col == 'default':
continue
#首先判断是否是名义变量
if datawoe[col].dtype == 'O':
for cat in datawoe[col].unique():
#计算单个分类的woe
nob = max(1, sum((datawoe.default == 1) & (datawoe[col] == cat)))
tnob = sum(datawoe.default == 1)
nog = max(1, sum((datawoe.default == 0) & (datawoe[col] == cat)))
tnog = sum(datawoe.default == 0)
woei = np.log((nob/tnob)/(nog/tnog))
datawoe[col] = datawoe[col].replace({cat:woei})
else:
#对连续特征粗分类
if cmethod == 'quantile':
arrayA = np.arange(0,100,100/nclusters)
arrayB = np.array([100]);
arrayA = np.concatenate((arrayA,arrayB))
breakpoints = np.unique(np.percentile(datawoe[col],arrayA))
if len(breakpoints) == 2:
breakpoints = np.array([breakpoints[0], np.mean(breakpoints), breakpoints[1]])
else:
minvalue = datawoe[col].min()
maxvalue = datawoe[col].max()
breakpoints = np.arange(minvalue, maxvalue, (maxvalue-minvalue)/nclusters)
breakpoints = np.append(breakpoints, maxvalue)
labels = np.arange(len(breakpoints) - 1)
datawoe[col] = pd.cut(datawoe[col],bins=breakpoints,right=True,labels=labels,include_lowest=True)
datawoe[col] = datawoe[col].astype('int64')
for cat in datawoe[col].unique():
#计算单个分类的woe
nob = max(1, sum((datawoe.default == 1) & (datawoe[col] == cat)))
tnob = sum(datawoe.default == 1)
nog = max(1, sum((datawoe.default == 0) & (datawoe[col] == cat)))
tnog = sum(datawoe.default == 0)
woei = np.log((nob/tnob)/(nog/tnog))
datawoe[col] = datawoe[col].replace({cat:woei})
return datawoe
def binandwoe_traintest(self, X_train, y_train, X_test, nclusters, cmethod):
#进行粗分类和woe转换
#进行粗分类(bin)时,bq=True对连续变量等分位数分段,bp=False对连续变量等宽分段
#先对X_train进行粗分类和woe转换,然后根据X_train的分类结果对X_test进行粗分类和woe转换
X_train = X_train.copy()
X_test = X_test.copy()
for col in X_train.columns:
if col == 'default':
continue
#对连续特征粗分类
if X_train[col].dtype != 'O':
if cmethod == 'quantile':#等分位数划分
arrayA = np.arange(0,100,100/nclusters)
arrayB = np.array([100]);
arrayA = np.concatenate((arrayA,arrayB))
breakpoints = np.unique(np.percentile(X_train[col],arrayA))
if len(breakpoints) == 2:
breakpoints = np.array([breakpoints[0], np.mean(breakpoints), breakpoints[1]])
elif cmethod == 'equal':#等距离划分
minvalue = X_train[col].min()
maxvalue = X_train[col].max()
breakpoints = np.arange(minvalue, maxvalue, (maxvalue-minvalue)/nclusters)
breakpoints = np.append(breakpoints, maxvalue)
elif cmethod == 'kmeans':#kmeans聚类划分
x = np.array(X_train[col]).reshape([X_train.shape[0],1])
cmodel = skcluster.KMeans(n_clusters=nclusters, random_state=0).fit(x)
elif cmethod.upper()=='DBSCAN':#dbscan聚类划分
x = np.array(X_train[col]).reshape([X_train.shape[0],1])
cmodel = skcluster.DBSCAN(min_samples=nclusters).fit(x)
elif cmethod.upper() =='BIRCH':#birch聚类划分
x = np.array(X_train[col]).reshape([X_train.shape[0],1])
cmodel = skcluster.Birch(threshold=0.1,n_clusters=None).fit(x)
elif cmethod =='MiniBatchKMeans':#MiniBatchKMeans聚类划分
x = np.array(X_train[col]).reshape([X_train.shape[0],1])
cmodel = skcluster.MiniBatchKMeans(n_clusters=nclusters).fit(x)
if (cmethod == 'quantile') or (cmethod == 'equal'):
#分段并标识为相应标签labels
labels = np.arange(len(breakpoints) - 1)
X_train[col] = pd.cut(X_train[col],bins=breakpoints,right=True,labels=labels,include_lowest=True)
X_train[col] = X_train[col].astype('object')
X_test[col] = pd.cut(X_test[col],bins=breakpoints,right=True,labels=labels,include_lowest=True)
X_test[col] = X_test[col].astype('object')
elif (cmethod == 'kmeans') or (cmethod.upper()=='DBSCAN') or (cmethod.upper() =='BIRCH'):
#根据聚类结果标识为相应标签labels
X_train[col] = cmodel.labels_
X_train[col] = X_train[col].astype('object')
if cmethod.upper()=='DBSCAN':
X_test[col] = cmodel.fit_predict(np.array(X_test[col]).reshape([X_test.shape[0],1]))
X_test[col] = X_test[col].astype('object')
else:
X_test[col] = cmodel.predict(np.array(X_test[col]).reshape([X_test.shape[0],1]))
X_test[col] = X_test[col].astype('object')
#woe转换
#对test中出现但没在train中出现的值,woe取值为0
xtrainunique = X_train[col].unique()
xtestunique = X_test[col].unique()
for cat in xtestunique:
if not any(xtrainunique == cat):
X_test[col] = X_test[col].replace({cat:0})
#对train中数据做woe转换,并对test中数据做相同的转换
for cat in xtrainunique:
#计算单个分类的woe
nob = max(1, sum((y_train == 1) & (X_train[col] == cat)))
tnob = sum(y_train == 1)
nog = max(1, sum((y_train == 0) & (X_train[col] == cat)))
tnog = sum(y_train == 0)
woei = np.log((nob/tnob)/(nog/tnog))
X_train[col] = X_train[col].replace({cat:woei})
if any(xtestunique == cat):
X_test[col] = X_test[col].replace({cat:woei})
return X_train, X_test
def dataencoder(self):
data = self.data
#引入哑变量
data_feature = data.ix[:, data.columns != 'default']
data_feature0 = data_feature.ix[:, data_feature.dtypes!='object']
data_feature1 = pd.DataFrame()
for col in data_feature.columns:
if data_feature[col].dtype == 'O':
le = preprocessing.LabelEncoder()
temp = pd.DataFrame(le.fit_transform(data_feature[col]), columns=[col])
data_feature1 = pd.concat([data_feature1, temp], axis=1)
enc = preprocessing.OneHotEncoder()
data_feature1_enc = pd.DataFrame(enc.fit_transform(data_feature1).toarray())
data_feature_enc = pd.concat([data_feature0, data_feature1_enc], axis=1)
return(data_feature_enc)
def imbalanceddata (self, X_train, y_train, resmethod):
if resmethod == 'ClusterCentroids':
nm = imblearn.under_sampling.RandomUnderSampler(random_state=0)
X_res, y_res = nm.fit_sample(X_train, y_train)
elif resmethod == 'CondensedNearestNeighbour':
nm = imblearn.under_sampling.RandomUnderSampler(random_state=0)
X_res, y_res = nm.fit_sample(X_train, y_train)
elif resmethod == 'NearMiss':
nm = imblearn.under_sampling.NearMiss(random_state=0)
X_res, y_res = nm.fit_sample(X_train, y_train)
elif resmethod == 'RandomUnderSampler':
nm = imblearn.under_sampling.RandomUnderSampler(random_state=0)
X_res, y_res = nm.fit_sample(X_train, y_train)
elif resmethod == 'ADASYN':
nm = imblearn.over_sampling.ADASYN(random_state=0)
X_res, y_res = nm.fit_sample(X_train, y_train)
elif resmethod == 'RandomOverSampler':
nm = imblearn.over_sampling.RandomOverSampler(random_state=0)
X_res, y_res = nm.fit_sample(X_train, y_train)
elif resmethod == 'SMOTE':
nm = imblearn.over_sampling.ADASYN(random_state=0)
X_res, y_res = nm.fit_sample(X_train, y_train)
elif resmethod == 'SMOTEENN':
nm = imblearn.combine.SMOTEENN(random_state=0)
X_res, y_res = nm.fit_sample(X_train, y_train)
elif resmethod == 'SMOTETomek':
nm = imblearn.combine.SMOTETomek(random_state=0)
X_res, y_res = nm.fit_sample(X_train, y_train)
else:
X_res, y_res = X_train, y_train
return X_res, y_res
def modelmetrics_binary(self, predresult):
#准确率
scores = metrics.accuracy_score(predresult['target'], predresult['predicted'])
print('cross_validation scores: %s' %scores)
#混合概率矩阵
confusion_matrix = pd.DataFrame(metrics.confusion_matrix(predresult['target'], predresult['predicted']), index=['real_negtive', 'real_postive'], columns=['pred_negtive', 'pred_postive'])
confusion_matrix_prob = confusion_matrix.copy()
confusion_matrix_prob.iloc[:, 0] = confusion_matrix_prob.iloc[:, 0] / confusion_matrix_prob.iloc[:, 0].sum()
confusion_matrix_prob.iloc[:, 1] = confusion_matrix_prob.iloc[:, 1] / confusion_matrix_prob.iloc[:, 1].sum()
print(confusion_matrix)
print(confusion_matrix_prob)
#精确度和召回率
precision = metrics.precision_score(predresult['target'], predresult['predicted'])
recall = metrics.recall_score(predresult['target'], predresult['predicted'])
print('precision scores: %s' %precision)
print('recall scores: %s' %recall)
###AUC KS值
auc = metrics.roc_auc_score(predresult.target, predresult.score)
print('AUC: %s' %auc)
###画出ROC曲线
fpr, tpr, _ = metrics.roc_curve(predresult.target, predresult.score)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
def modelmetrics_scores(self, predresult):
###AUC KS值
auc = metrics.roc_auc_score(predresult.target, predresult.probability)
print('AUC: %s' %auc)
##### KS值
G = predresult.ix[predresult.target == 0, 'probability']
B = predresult.ix[predresult.target == 1, 'probability']
ks,d = ss.ks_2samp(G,B)
print('ks: %s d:%s' %(ks,d))
###在某个概率分界值p下,模型预测的各项准确率
metrics_p = pd.DataFrame()
for p in [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
predresult['predicted'] = (predresult.probability > p).astype(int)
pred_accuracy = sum(predresult.predicted == predresult.target)/predresult.shape[0]
confusion_matrix = pd.DataFrame(metrics.confusion_matrix(predresult['target'], predresult['predicted']), index=['real_negtive', 'real_postive'], columns=['pred_negtive', 'pred_postive'])
confusion_matrix_prob = confusion_matrix.copy()
confusion_matrix_prob.iloc[:, 0] = confusion_matrix_prob.iloc[:, 0] / confusion_matrix_prob.iloc[:, 0].sum()
confusion_matrix_prob.iloc[:, 1] = confusion_matrix_prob.iloc[:, 1] / confusion_matrix_prob.iloc[:, 1].sum()
# print(confusion_matrix)
# print(confusion_matrix_prob)
precision = metrics.precision_score(predresult['target'], predresult['predicted'])
recall = metrics.recall_score(predresult['target'], predresult['predicted'])
pass_rate = sum(predresult.predicted == 0)/predresult.shape[0]
temp = pd.DataFrame({'p0': p, 'accuracy': pred_accuracy, 'precision': precision,
'recall': recall, 'pass_rate': pass_rate, 'FalseNegative': confusion_matrix_prob.iloc[1, 0]}, index=[0])
temp = temp[['p0', 'accuracy', 'precision', 'recall', 'pass_rate', 'FalseNegative']]
metrics_p = pd.concat([metrics_p, temp], ignore_index = True)
print(metrics_p)
###画出ROC曲线
fpr, tpr, _ = metrics.roc_curve(predresult.target, predresult.probability)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
###画出KS曲线
data1 = np.sort(G)
data2 = np.sort(B)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.sort(np.concatenate([data1, data2]))
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
plt.figure()
plt.plot(data_all,cdf1, color='darkorange',lw=2, label='KS: %0.2f)' % ks)
plt.plot(data_all,cdf2, color='red')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('VALUE')
plt.ylabel('STATS')
plt.title('KS-CURVE characteristic example')
plt.legend(loc="lower right")
plt.show()
def maxprofit_p0(self, predresult, riskcontrol_cost, lend_rate, borrow_rate):
###在某个概率分界值p下,模型预测的各项准确率
profit_p0 = pd.DataFrame()
for p0 in range(1, 100):
predresult['predicted'] = (predresult.probability > p0/100).astype(int)
confusion_matrix = pd.DataFrame(metrics.confusion_matrix(predresult['target'], predresult['predicted']), index=['real_negtive', 'real_postive'], columns=['pred_negtive', 'pred_postive'])
confusion_matrix_prob = confusion_matrix.copy()
confusion_matrix_prob.iloc[:, 0] = confusion_matrix_prob.iloc[:, 0] / confusion_matrix_prob.iloc[:, 0].sum()
confusion_matrix_prob.iloc[:, 1] = confusion_matrix_prob.iloc[:, 1] / confusion_matrix_prob.iloc[:, 1].sum()
pass_rate = sum(predresult.predicted == 0)/predresult.shape[0]
if np.isnan(confusion_matrix_prob.iloc[1, 0]):
confusion_matrix_prob.iloc[1, 0] = 0
revenue = pass_rate * (1 - confusion_matrix_prob.iloc[1, 0]) * (lend_rate - borrow_rate)
loss = riskcontrol_cost + pass_rate * confusion_matrix_prob.iloc[1, 0] * (1 + borrow_rate)
tot_profit = revenue - loss
profit_rate = tot_profit / pass_rate
temp = pd.DataFrame({'p0': p0/100, 'tot_profit': tot_profit, 'profit_rate': profit_rate, 'revenue': revenue, 'loss': loss, 'pass_rate': pass_rate, 'FalseNegative': confusion_matrix_prob.iloc[1, 0]}, index=[0])
profit_p0 = pd.concat([profit_p0, temp], ignore_index = True)
profit_p0 = profit_p0[['p0', 'tot_profit', 'profit_rate', 'revenue', 'loss', 'pass_rate', 'FalseNegative']]
profit_p0 = profit_p0.sort_values(by='tot_profit', ascending=False)
print("Best P0:")
print(profit_p0.iloc[0, :])
return profit_p0
def loopmodelmetrics_scores(self, predresult):
###AUC KS值
auc = metrics.roc_auc_score(predresult.target, predresult.probability)
print('AUC: %s' %auc)
##### KS值
G = predresult.ix[predresult.target == 0, 'probability']
B = predresult.ix[predresult.target == 1, 'probability']
ks,d = ss.ks_2samp(G,B)
print('ks: %s d:%s' %(ks,d))
###在某个概率分界值p下,模型预测的各项准确率
metrics_p = pd.DataFrame()
for p in [0.05, 0.1, 0.2, 0.3, 0.4, 0.5]:
predresult['predicted'] = (predresult.probability > p).astype(int)
pred_accuracy = sum(predresult.predicted == predresult.target)/predresult.shape[0]
confusion_matrix = pd.DataFrame(metrics.confusion_matrix(predresult['target'], predresult['predicted']), index=['real_negtive', 'real_postive'], columns=['pred_negtive', 'pred_postive'])
confusion_matrix_prob = confusion_matrix.copy()
confusion_matrix_prob.iloc[:, 0] = confusion_matrix_prob.iloc[:, 0] / confusion_matrix_prob.iloc[:, 0].sum()
confusion_matrix_prob.iloc[:, 1] = confusion_matrix_prob.iloc[:, 1] / confusion_matrix_prob.iloc[:, 1].sum()
precision = metrics.precision_score(predresult['target'], predresult['predicted'])
recall = metrics.recall_score(predresult['target'], predresult['predicted'])
pass_rate = sum(predresult.predicted == 0)/predresult.shape[0]
temp = pd.DataFrame({'p0': p, 'accuracy': pred_accuracy, 'precision': precision,
'recall': recall, 'pass_rate': pass_rate, 'FalseNegative': confusion_matrix_prob.iloc[1, 0]}, index=[0])
temp = temp[['p0', 'accuracy', 'precision', 'recall', 'pass_rate', 'FalseNegative']]
metrics_p = pd.concat([metrics_p, temp], ignore_index = True)
print(metrics_p)
return auc, ks, metrics_p
| apache-2.0 |
liuwenf/moose | python/peacock/tests/postprocessor_tab/test_PostprocessorSelectPlugin.py | 5 | 3902 | #!/usr/bin/env python
import sys
import os
import unittest
import shutil
import time
from PyQt5 import QtCore, QtWidgets
from peacock.PostprocessorViewer.plugins.PostprocessorSelectPlugin import main
from peacock.utils import Testing
import mooseutils
class TestPostprocessorSelectPlugin(Testing.PeacockImageTestCase):
"""
Test class for the ArtistToggleWidget which toggles postprocessor lines.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
def setUp(self):
"""
Creates the GUI containing the ArtistGroupWidget and the matplotlib figure axes.
"""
# Filenames to load
self._filename = '{}_test.csv'.format(self.__class__.__name__)
self._filename2 = '{}_test2.csv'.format(self.__class__.__name__)
# Read the data
filenames = [self._filename, self._filename2]
self._control, self._widget, self._window = main(filenames, mooseutils.PostprocessorReader)
def copyfiles(self, partial=False):
"""
Move files into the temporary location.
"""
if partial:
shutil.copyfile('../input/white_elephant_jan_2016_partial.csv', self._filename)
else:
shutil.copyfile('../input/white_elephant_jan_2016.csv', self._filename)
shutil.copyfile('../input/postprocessor.csv', self._filename2)
for data in self._widget._data:
data.load()
def tearDown(self):
"""
Remove temporary files.
"""
if os.path.exists(self._filename):
os.remove(self._filename)
if os.path.exists(self._filename2):
os.remove(self._filename2)
def testEmpty(self):
"""
Test that an empty plot is possible.
"""
self.assertImage('testEmpty.png')
def testSelect(self):
"""
Test that plotting from multiple files works.
"""
self.copyfiles()
vars = ['air_temp_set_1', 'sincos']
for i in range(len(vars)):
self._control._groups[i]._toggles[vars[i]].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._groups[i]._toggles[vars[i]].CheckBox.clicked.emit(True)
self.assertImage('testSelect.png')
def testUpdateData(self):
"""
Test that a postprocessor data updates when file is changed.
"""
self.copyfiles(partial=True)
var = 'air_temp_set_1'
self._control._groups[0]._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._groups[0]._toggles[var].CheckBox.clicked.emit(True)
self.assertImage('testUpdateData0.png')
# Reload the data (this would be done via a Timer)
time.sleep(1) # need to wait a bit for the modified time to change
self.copyfiles()
self.assertImage('testUpdateData1.png')
def testRepr(self):
"""
Test python scripting.
"""
self.copyfiles()
vars = ['air_temp_set_1', 'sincos']
for i in range(len(vars)):
self._control._groups[i]._toggles[vars[i]].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._groups[i]._toggles[vars[i]].CheckBox.clicked.emit(True)
output, imports = self._control.repr()
self.assertIn("data = mooseutils.PostprocessorReader('TestPostprocessorSelectPlugin_test.csv')", output)
self.assertIn("x = data('time')", output)
self.assertIn("y = data('air_temp_set_1')", output)
self.assertIn("axes0.plot(x, y, marker='', linewidth=1, color=[0.2, 0.627, 0.173, 1.0], markersize=1, linestyle='-', label='air_temp_set_1')", output)
self.assertIn("data = mooseutils.PostprocessorReader('TestPostprocessorSelectPlugin_test2.csv')", output)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
| lgpl-2.1 |
tardis-sn/tardis | tardis/plasma/base.py | 1 | 12461 | import os
import re
import logging
import tempfile
import fileinput
import networkx as nx
import pandas as pd
from tardis.plasma.exceptions import PlasmaMissingModule, NotInitializedModule
from tardis.plasma.properties.base import *
from tardis.io.util import PlasmaWriterMixin
logger = logging.getLogger(__name__)
class BasePlasma(PlasmaWriterMixin):
outputs_dict = {}
hdf_name = "plasma"
def __init__(self, plasma_properties, property_kwargs=None, **kwargs):
self.outputs_dict = {}
self.input_properties = []
self.plasma_properties = self._init_properties(
plasma_properties, property_kwargs, **kwargs
)
self._build_graph()
# self.write_to_tex('Plasma_Graph')
self.update(**kwargs)
def __getattr__(self, item):
if item in self.outputs_dict:
return self.get_value(item)
else:
super(BasePlasma, self).__getattribute__(item)
def __setattr__(self, key, value):
if key != "module_dict" and key in self.outputs_dict:
raise AttributeError(
"Plasma inputs can only be updated using " "the 'update' method"
)
else:
super(BasePlasma, self).__setattr__(key, value)
def __dir__(self):
attrs = [item for item in self.__dict__ if not item.startswith("_")]
attrs += [
item for item in self.__class__.__dict__ if not item.startswith("_")
]
attrs += self.outputs_dict.keys()
return attrs
@property
def plasma_properties_dict(self):
return {item.name: item for item in self.plasma_properties}
def get_value(self, item):
return getattr(self.outputs_dict[item], item)
def _build_graph(self):
"""
Builds the directed Graph using network X
:param plasma_modules:
:return:
"""
self.graph = nx.DiGraph()
## Adding all nodes
self.graph.add_nodes_from(
[
(plasma_property.name, {})
for plasma_property in self.plasma_properties
]
)
# Flagging all input modules
self.input_properties = [
item
for item in self.plasma_properties
if not hasattr(item, "inputs")
]
for plasma_property in self.plasma_properties:
# Skipping any module that is an input module
if plasma_property in self.input_properties:
continue
for input in plasma_property.inputs:
if input not in self.outputs_dict:
raise PlasmaMissingModule(
f"Module {plasma_property.name} requires input "
f"{input} which has not been added"
f" to this plasma"
)
try:
position = self.outputs_dict[input].outputs.index(input)
label = self.outputs_dict[input].latex_name[position]
label = "$" + label + "$"
label = label.replace("\\", "\\\\")
except:
label = input.replace("_", "-")
self.graph.add_edge(
self.outputs_dict[input].name,
plasma_property.name,
label=label,
)
def _init_properties(
self, plasma_properties, property_kwargs=None, **kwargs
):
"""
Builds a dictionary with the plasma module names as keys
Parameters
----------
plasma_modules : list
list of Plasma properties
property_kwargs : dict
dict of plasma module : kwargs pairs. kwargs should be a dict
of arguments that will be passed to the __init__ method of
the respective plasma module.
kwargs : dictionary
input values for input properties. For example, t_rad=[5000, 6000,],
j_blues=[..]
"""
if property_kwargs is None:
property_kwargs = {}
plasma_property_objects = []
self.previous_iteration_properties = []
self.outputs_dict = {}
for plasma_property in plasma_properties:
if issubclass(plasma_property, PreviousIterationProperty):
current_property_object = plasma_property(
**property_kwargs.get(plasma_property, {})
)
current_property_object.set_initial_value(kwargs)
self.previous_iteration_properties.append(
current_property_object
)
elif issubclass(plasma_property, Input):
if not set(kwargs.keys()).issuperset(plasma_property.outputs):
missing_input_values = set(plasma_property.outputs) - set(
kwargs.keys()
)
raise NotInitializedModule(
f"Input {missing_input_values} required for "
f"plasma but not given when "
f"instantiating the "
f"plasma"
)
current_property_object = plasma_property(
**property_kwargs.get(plasma_property, {})
)
else:
current_property_object = plasma_property(
self, **property_kwargs.get(plasma_property, {})
)
for output in plasma_property.outputs:
self.outputs_dict[output] = current_property_object
plasma_property_objects.append(current_property_object)
return plasma_property_objects
def store_previous_properties(self):
for property in self.previous_iteration_properties:
p = property.outputs[0]
self.outputs_dict[p].set_value(
self.get_value(re.sub(r"^previous_", "", p))
)
def update(self, **kwargs):
for key in kwargs:
if key not in self.outputs_dict:
raise PlasmaMissingModule(
f"Trying to update property {key}" f" that is unavailable"
)
self.outputs_dict[key].set_value(kwargs[key])
for module_name in self._resolve_update_list(kwargs.keys()):
self.plasma_properties_dict[module_name].update()
def freeze(self, *args):
"""
Freeze plama properties.
This method freezes plasma properties to prevent them from being
updated: the values of a frozen property are fixed in the plasma
calculation. This is useful for example for setting up test cases.
Parameters
----------
args : iterable of str
Names of plasma properties to freeze.
Examples
--------
>>> plasma.freeze('t_electrons')
"""
for key in args:
if key not in self.outputs_dict:
raise PlasmaMissingModule(
"Trying to freeze property {0}"
" that is unavailable".format(key)
)
self.outputs_dict[key].frozen = True
def thaw(self, *args):
"""
Thaw plama properties.
This method thaws (unfreezes) plasma properties allowing them to be
updated again.
Parameters
----------
args : iterable of str
Names of plasma properties to unfreeze.
Examples
--------
>>> plasma.thaw('t_electrons')
"""
for key in args:
if key not in self.outputs_dict:
raise PlasmaMissingModule(
"Trying to thaw property {0}"
" that is unavailable".format(key)
)
self.outputs_dict[key].frozen = False
def _update_module_type_str(self):
for node in self.graph:
self.outputs_dict[node]._update_type_str()
def _resolve_update_list(self, changed_properties):
"""
Returns a list of all plasma models which are affected by the
changed_modules due to there dependency in the
the plasma_graph.
Parameters
----------
changed_modules : list
all modules changed in the plasma
Returns
-------
: list
all affected modules.
"""
descendants_ob = []
for plasma_property in changed_properties:
node_name = self.outputs_dict[plasma_property].name
descendants_ob += nx.descendants(self.graph, node_name)
descendants_ob = list(set(descendants_ob))
sort_order = list(nx.topological_sort(self.graph))
descendants_ob.sort(key=lambda val: sort_order.index(val))
logger.debug(
f'Updating modules in the following order: {"->".join(descendants_ob)}'
)
return descendants_ob
def write_to_dot(self, fname, latex_label=True):
# self._update_module_type_str()
try:
import pygraphviz
except:
logger.warn(
"pygraphviz missing. Plasma graph will not be " "generated."
)
return
print_graph = self.graph.copy()
print_graph = self.remove_hidden_properties(print_graph)
for node in print_graph:
print_graph.node[str(node)]["label"] = node
if hasattr(self.plasma_properties_dict[node], "latex_formula"):
formulae = self.plasma_properties_dict[node].latex_formula
for output in range(0, len(formulae)):
formula = formulae[output]
label = formula.replace("\\", "\\\\")
print_graph.node[str(node)]["label"] += "\\n$"
print_graph.node[str(node)]["label"] += label
print_graph.node[str(node)]["label"] += "$"
nx.drawing.nx_agraph.write_dot(print_graph, fname)
def write_to_tex(self, fname_graph):
try:
import dot2tex
except:
logger.warn(
"dot2tex missing. Plasma graph will not be " "generated."
)
return
temp_fname = tempfile.NamedTemporaryFile().name
self.write_to_dot(temp_fname)
dot_string = open(temp_fname).read()
open(fname_graph, "w").write(dot2tex.dot2tex(dot_string, texmode="raw"))
for line in fileinput.input(fname_graph, inplace=1):
print(
line.replace(
r"\documentclass{article}",
r"\documentclass[class=minimal,border=20pt]{standalone}",
),
end="",
)
for line in fileinput.input(fname_graph, inplace=1):
print(line.replace(r"\enlargethispage{100cm}", ""), end="")
def remove_hidden_properties(self, print_graph):
for item in self.plasma_properties_dict.values():
module = self.plasma_properties_dict[item.name].__class__
if issubclass(module, HiddenPlasmaProperty):
output = module.outputs[0]
for value in self.plasma_properties_dict.keys():
if output in getattr(
self.plasma_properties_dict[value], "inputs", []
):
for input in self.plasma_properties_dict[
item.name
].inputs:
try:
position = self.outputs_dict[
input
].outputs.index(input)
label = self.outputs_dict[input].latex_name[
position
]
label = "$" + label + "$"
label = label.replace("\\", "\\\\")
except:
label = input.replace("_", "-")
self.graph.add_edge(
self.outputs_dict[input].name,
value,
label=label,
)
print_graph.remove_node(str(item.name))
return print_graph
| bsd-3-clause |
Upward-Spiral-Science/ugrad-data-design-team-0 | reveal/histogramequalization/histeq.py | 1 | 3107 | ## source: www.janeriksolem.net
import os
import matplotlib.pyplot as plt
from PIL import Image
from pylab import *
from numpy import *
def get_imlist(path):
""" Returns a list of filenames for
all jpg images in a directory. """
return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.jpg')]
def compute_average(imlist):
""" Compute the average of a list of images. """
# open first image and make into array of type float
averageim = array(Image.open(imlist[0]), 'f')
skipped = 0
for imname in imlist[1:]:
try:
averageim += array(Image.open(imname))
except:
print imname + "...skipped"
skipped += 1
averageim /= (len(imlist) - skipped)
# return average as uint8
return array(averageim, 'uint8')
def convert_to_grayscale(imlist):
""" Convert a set of images to grayscale. """
for imname in imlist:
im = Image.open(imname).convert("L")
im.save(imname)
def imresize(im,sz):
""" Resize an image array using PIL. """
pil_im = Image.fromarray(uint8(im))
return array(pil_im.resize(sz))
def histeq(im,nbr_bins=256):
""" Histogram equalization of a grayscale image. """
# get image histogram
imhist,bins = histogram(im.flatten(),nbr_bins,normed=True)
cdf = imhist.cumsum() # cumulative distribution function
cdf = 255 * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
im2 = interp(im.flatten(),bins[:-1],cdf)
return im2.reshape(im.shape), cdf
def plot_2D_boundary(plot_range,points,decisionfcn,labels,values=[0]):
""" Plot_range is (xmin,xmax,ymin,ymax), points is a list
of class points, decisionfcn is a funtion to evaluate,
labels is a list of labels that decisionfcn returns for each class,
values is a list of decision contours to show. """
clist = ['b','r','g','k','m','y'] # colors for the classes
# evaluate on a grid and plot contour of decision function
x = arange(plot_range[0],plot_range[1],.1)
y = arange(plot_range[2],plot_range[3],.1)
xx,yy = meshgrid(x,y)
xxx,yyy = xx.flatten(),yy.flatten() # lists of x,y in grid
zz = array(decisionfcn(xxx,yyy))
zz = zz.reshape(xx.shape)
# plot contour(s) at values
contour(xx,yy,zz,values)
# for each class, plot the points with '*' for correct, 'o' for incorrect
for i in range(len(points)):
d = decisionfcn(points[i][:,0],points[i][:,1])
correct_ndx = labels[i]==d
incorrect_ndx = labels[i]!=d
plot(points[i][correct_ndx,0],points[i][correct_ndx,1],'*',color=clist[i])
plot(points[i][incorrect_ndx,0],points[i][incorrect_ndx,1],'o',color=clist[i])
axis('equal')
##im = array(Image.open('black-and-white-tips.jpg').convert('L'))
im = array(Image.open('black-and-white-tips.jpg'))
im2,cdf = histeq(im)
plt.plot(im2)
##plt.plot(cdf)
##plt.savefig('black-and-white-tips2img.jpg')
plt.show()
| apache-2.0 |
jashwanth9/Expert-recommendation-system | code/anssamble_logistic.py | 1 | 3433 | import numpy as np
import cPickle as pickle
from sklearn import linear_model
def read_files(file_name):
with open(file_name) as question_info_file:
content = question_info_file.readlines()
formated_content = {}
list_keys = []
for i in content:
element = i.strip("\n").split("\t")
values = []
for i in range(1, len(element)):
temp_element = element[i].strip()
if temp_element == '/' or temp_element == '':
values.append([])
else:
values.append(map(int, temp_element.split('/')))
list_keys.append(element[0])
formated_content[element[0]] = values
return formated_content, list_keys
def norm_last3_feats(question_info_data, question_info_keys):
x_train = []
for i in question_info_keys:
p = question_info_data[i]
x_train.append(p[3] + p[4] + p[5])
mean_train = np.mean(x_train, axis=0)
std_train = np.std(x_train, axis=0)
x_train = (x_train - mean_train)/(std_train)
last3_feats = {}
for i in question_info_keys:
p = question_info_data[i]
x_t = p[3] + p[4] + p[5]
last3_feats[i] = (x_t - mean_train)/(std_train)
return last3_feats
def loadTrainingData(inpFile, labelFile, ques_3_data):
with open(labelFile) as invited_info_file:
content = invited_info_file.readlines()
with open(inpFile) as inp_info_file:
train_data = inp_info_file.readlines()
x_train = []
y_train = []
for i in range(len(content)):
element = content[i].strip("\n").split("\t")
inpele = train_data[i].strip("\n").split(",")
x_train.append(np.hstack((ques_3_data[inpele[0]], np.array(float(inpele[2].strip())))))
y_train.append(int(element[2].strip()))
return x_train, y_train
def loadTestData(testFile, labelFile, ques_3_data):
with open(labelFile) as invited_info_file:
content = invited_info_file.readlines()
testData = []
with open(testFile, 'r') as f1:
for line in f1:
testData.append(line.rstrip('\r\n').split(','))
x_test = []
for i in range(len(content)):
element = content[i].strip("\n").split(",")
x_test.append(np.hstack((ques_3_data[element[0]], np.array(float(element[2].strip())))))
return x_test, testData
def writeToOutputFile(x_test, y_pred, outFile, testData):
with open(outFile, 'w') as f1:
f1.write("qid,uid,label\n")
y_pred[y_pred<0] = 0
for i in range(len(x_test)):
f1.write("{0},{1},{2}\n".format(testData[i+1][0], testData[i+1][1], y_pred[i][1]))
if __name__ == '__main__':
question_info_data, question_info_keys = read_files('../train_data/question_info.txt')
# p = question_info_data[question_info_keys[0]]
# print p[3] + p[4] + p[5]
ques_3_data = norm_last3_feats(question_info_data, question_info_keys)
print ques_3_data[question_info_keys[0]]
x_train, y_train = loadTrainingData('../features/contentbased_char_tfidfrevtrain.csv', '../train_data/invited_info_train.txt', ques_3_data)
x_test, testData = loadTestData('../train_data/validate_nolabel.txt', '../features/content_char_tfidf_rev.csv', ques_3_data)
print('x_train = ', x_train[0])
print('y_train = ', y_train[0])
print('x_test = ', x_test[0])
lr = linear_model.LogisticRegression()
model = lr.fit(x_train, y_train)
print model.coef_
print model.intercept_
print model.get_params()
y_pred = model.predict_proba(x_test)
writeToOutputFile(x_test, y_pred, '../validation/val_anssamble_logreg.csv', testData)
# weights - [ 5.17249459]] bias [-2.8807507] | apache-2.0 |
DistrictDataLabs/yellowbrick | tests/test_classifier/conftest.py | 1 | 3458 | # tests.test_classifier.conftest
# Provides fixtures for the classification tests module.
#
# Author: Benjamin Bengfort
# Created: Fri Mar 23 18:07:00 2018 -0400
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: conftest.py [1e04216] [email protected] $
"""
Provides fixtures for the classification tests module.
"""
##########################################################################
## Imports
##########################################################################
import pytest
from tests.fixtures import Dataset, Split
from yellowbrick.exceptions import NotFitted
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split as tts
##########################################################################
## Assertion Helpers
##########################################################################
ATTRS = ["classes_", "class_count_", "score_"]
def assert_not_fitted(estimator, attrs=ATTRS, X_test=None):
"""
Check that the estimator is not fitted by ensuring it does not have
any of the attributes specified in attrs. If X_test is specified,
it is passed to predict, which must also raise a NotFitted exception.
"""
__traceback_hide__ = True
for attr in attrs:
msg = "model is fitted, has {} attribute".format(attr)
assert not hasattr(estimator, attr), msg
if X_test is not None:
with pytest.raises((NotFitted, NotFittedError)):
estimator.predict(X_test)
def assert_fitted(estimator, attrs=ATTRS, X_test=None):
"""
Check that the estimator is fitted by ensuring it does have the attributes
passed in attrs. If X_test is specified, it is passed to predict which
must not raise a NotFitted exception.
"""
__traceback_hide__ = True
for attr in attrs:
msg = "model is not fitted, does not have {} attribute".format(attr)
assert hasattr(estimator, attr), msg
if X_test is not None:
try:
estimator.predict(X_test)
except (NotFitted, NotFittedError):
pytest.fail("estimator not fitted raised from predict")
##########################################################################
## Fixtures
##########################################################################
@pytest.fixture(scope="class")
def binary(request):
"""
Creates a random binary classification dataset fixture
"""
X, y = make_classification(
n_samples=500,
n_features=20,
n_informative=8,
n_redundant=2,
n_classes=2,
n_clusters_per_class=3,
random_state=87,
)
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=93)
dataset = Dataset(Split(X_train, X_test), Split(y_train, y_test))
request.cls.binary = dataset
@pytest.fixture(scope="class")
def multiclass(request):
"""
Creates a random multiclass classification dataset fixture
"""
X, y = make_classification(
n_samples=500,
n_features=20,
n_informative=8,
n_redundant=2,
n_classes=6,
n_clusters_per_class=3,
random_state=87,
)
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=93)
dataset = Dataset(Split(X_train, X_test), Split(y_train, y_test))
request.cls.multiclass = dataset
| apache-2.0 |
adykstra/mne-python | examples/connectivity/plot_mne_inverse_label_connectivity.py | 3 | 7519 | """
=========================================================================
Compute source space connectivity and visualize it using a circular graph
=========================================================================
This example computes the all-to-all connectivity between 68 regions in
source space based on dSPM inverse solutions and a FreeSurfer cortical
parcellation. The connectivity is visualized using a circular graph which
is ordered based on the locations of the regions in the axial plane.
"""
# Authors: Martin Luessi <[email protected]>
# Alexandre Gramfort <[email protected]>
# Nicolas P. Rougier (graph code borrowed from his matplotlib gallery)
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.connectivity import spectral_connectivity
from mne.viz import circular_layout, plot_connectivity_circle
print(__doc__)
###############################################################################
# Load our data
# -------------
#
# First we'll load the data we'll use in connectivity estimation. We'll use
# the sample MEG data provided with MNE.
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Load data
inverse_operator = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
###############################################################################
# Compute inverse solutions and their connectivity
# ------------------------------------------------
#
# Next, we need to compute the inverse solution for this data. This will return
# the sources / source activity that we'll use in computing connectivity. We'll
# compute the connectivity in the alpha band of these sources. We can specify
# particular frequencies to include in the connectivity with the ``fmin`` and
# ``fmax`` flags. Notice from the status messages how mne-python:
#
# 1. reads an epoch from the raw file
# 2. applies SSP and baseline correction
# 3. computes the inverse to obtain a source estimate
# 4. averages the source estimate to obtain a time series for each label
# 5. includes the label time series in the connectivity computation
# 6. moves to the next epoch.
#
# This behaviour is because we are using generators. Since we only need to
# operate on the data one epoch at a time, using a generator allows us to
# compute connectivity in a computationally efficient manner where the amount
# of memory (RAM) needed is independent from the number of epochs.
# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=True)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels = mne.read_labels_from_annot('sample', parc='aparc',
subjects_dir=subjects_dir)
label_colors = [label.color for label in labels]
# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
return_generator=True)
fmin = 8.
fmax = 13.
sfreq = raw.info['sfreq'] # the sampling frequency
con_methods = ['pli', 'wpli2_debiased']
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method=con_methods, mode='multitaper', sfreq=sfreq, fmin=fmin,
fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=1)
# con is a 3D array, get the connectivity for the first (and only) freq. band
# for each method
con_res = dict()
for method, c in zip(con_methods, con):
con_res[method] = c[:, :, 0]
###############################################################################
# Make a connectivity plot
# ------------------------
#
# Now, we visualize this connectivity using a circular graph layout.
# First, we reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
# Get the y-location of the label
label_ypos = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos.append(ypos)
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels]
# Save the plot order and create a circular layout
node_order = list()
node_order.extend(lh_labels[::-1]) # reverse the order
node_order.extend(rh_labels)
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) / 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 300 strongest connections.
plot_connectivity_circle(con_res['pli'], label_names, n_lines=300,
node_angles=node_angles, node_colors=label_colors,
title='All-to-All Connectivity left-Auditory '
'Condition (PLI)')
###############################################################################
# Make two connectivity plots in the same figure
# ----------------------------------------------
#
# We can also assign these connectivity plots to axes in a figure. Below we'll
# show the connectivity plot using two different connectivity methods.
fig = plt.figure(num=None, figsize=(8, 4), facecolor='black')
no_names = [''] * len(label_names)
for ii, method in enumerate(con_methods):
plot_connectivity_circle(con_res[method], no_names, n_lines=300,
node_angles=node_angles, node_colors=label_colors,
title=method, padding=0, fontsize_colorbar=6,
fig=fig, subplot=(1, 2, ii + 1))
plt.show()
###############################################################################
# Save the figure (optional)
# --------------------------
#
# By default matplotlib does not save using the facecolor, even though this was
# set when the figure was generated. If not set via savefig, the labels, title,
# and legend will be cut off from the output png file.
# fname_fig = data_path + '/MEG/sample/plot_inverse_connect.png'
# fig.savefig(fname_fig, facecolor='black')
| bsd-3-clause |
oshadura/GVgenetic | examples-ga/nsga.py | 1 | 4498 | import array
import random
import json
import numpy
from math import sqrt
from deap import algorithms
from deap import base
from deap import benchmarks
from deap.benchmarks.tools import diversity, convergence, hypervolume
from deap import creator
from deap import tools
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin)
toolbox = base.Toolbox()
# Problem definition
# Functions zdt1, zdt2, zdt3, zdt6 have bounds [0, 1]
BOUND_LOW, BOUND_UP = 0.0, 1.0
# Functions zdt4 has bounds x1 = [0, 1], xn = [-5, 5], with n = 2, ..., 10
# BOUND_LOW, BOUND_UP = [0.0] + [-5.0]*9, [1.0] + [5.0]*9
# Functions zdt1, zdt2, zdt3 have 30 dimensions, zdt4 and zdt6 have 10
NDIM = 30
def uniform(low, up, size=None):
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
toolbox.register("attr_float", uniform, BOUND_LOW, BOUND_UP, NDIM)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", runtimeGV)
toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0)
toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0/NDIM)
toolbox.register("select", tools.selNSGA2)
def main(seed=None):
random.seed(seed)
NGEN = 250
MU = 100
CXPB = 0.9
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean, axis=0)
stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
pop = toolbox.population(n=MU)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# This is just to assign the crowding distance to the individuals
# no actual selection is done
pop = toolbox.select(pop, len(pop))
record = stats.compile(pop)
logbook.record(gen=0, evals=len(invalid_ind), **record)
print(logbook.stream)
# Begin the generational process
for gen in range(1, NGEN):
# Vary the population
offspring = tools.selTournamentDCD(pop, len(pop))
offspring = [toolbox.clone(ind) for ind in offspring]
for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
if random.random() <= CXPB:
toolbox.mate(ind1, ind2)
toolbox.mutate(ind1)
toolbox.mutate(ind2)
del ind1.fitness.values, ind2.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Select the next generation population
pop = toolbox.select(pop + offspring, MU)
record = stats.compile(pop)
logbook.record(gen=gen, evals=len(invalid_ind), **record)
print(logbook.stream)
print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0]))
return pop, logbook
if __name__ == "__main__":
# with open("pareto_front/zdt1_front.json") as optimal_front_data:
# optimal_front = json.load(optimal_front_data)
# Use 500 of the 1000 points in the json file
# optimal_front = sorted(optimal_front[i] for i in range(0, len(optimal_front), 2))
pop, stats = main()
# pop.sort(key=lambda x: x.fitness.values)
print(stats)
print("Convergence: ", convergence(pop, optimal_front))
print("Diversity: ", diversity(pop, optimal_front[0], optimal_front[-1]))
import matplotlib.pyplot as plt
import numpy
front = numpy.array([ind.fitness.values for ind in pop])
optimal_front = numpy.array(optimal_front)
plt.scatter(optimal_front[:,0], optimal_front[:,1], c="r")
plt.scatter(front[:,0], front[:,1], c="b")
plt.axis("tight")
plt.show()
| gpl-2.0 |
pm8k/Spomato | setup.py | 1 | 1053 | """Author: Matthew Russell
Setup file for Spomato
"""
from setuptools import setup, find_packages
def readme():
"""Opens the readme file and returns it as a string
Returns
-------
string:
A string representation of the readme file
"""
with open('README.md') as file:
return file.read()
MAJOR = 0
MINOR = 2
PATCH = 0
VERSION = '.'.join([str(v) for v in [MAJOR, MINOR, PATCH]])
setup(name='spomato',
version=VERSION,
description='Tomato Timer with Spotify',
url='https://github.com/pm8k/spomato',
download_url='https://github.com/pm8k/spomato/archive/{V}.tar.gz'.format(V=VERSION),
long_description=readme(),
author='Matthew Russell',
author_email='[email protected]',
license='MIT',
keywords=['python', 'spotify', 'spotipy', 'tomato', 'timer', 'music'],
py_modules=['spomato'],
packages=['spomato'],
install_requires=[
'pandas',
'spotipy'
],
include_package_data=True,
zip_safe=False)
| mit |
tonysyu/deli | deli/layout/bounding_box.py | 1 | 4967 | import numpy as np
from matplotlib.transforms import Bbox
from traits.api import Event, HasStrictTraits, Instance, Property
__all__ = ['BoundingBox']
def calc_bounds(x, current_bounds):
x_min, x_max = current_bounds
x_lo = min(np.min(x), x_min)
x_hi = max(np.max(x), x_max)
if x_lo < x_min or x_hi > x_max:
return (x_lo, x_hi)
else:
return None
class Signal(object):
""" Signal that calls all connected handlers when fired.
This event signal is connected to `MPLBbox`, which is not a HasTraits
class, and thus, cannot fire Traits events.
"""
def __init__(self):
self._handlers = []
def connect(self, handler):
self._handlers.append(handler)
def fire(self, *args):
for handler in self._handlers:
handler(*args)
class MPLBbox(Bbox):
""" Bounding box represented by two (x, y) pairs.
Subclass Matplotlib's `Bbox` to fire an event when the bounds are changed.
"""
def __init__(self, points):
self.changed = Signal()
super(MPLBbox, self).__init__(points)
def invalidate(self):
super(MPLBbox, self).invalidate()
self.changed.fire()
@classmethod
def from_rect(cls, rect):
""" Return bounding box from rect given as (x0, y0, width, height).
Override this since matplotlib returns `Bbox`, not subclass, instance.
"""
x0, y0, width, height = rect
return cls.from_extents(x0, y0, x0 + width, y0 + height)
@classmethod
def from_extents(cls, x0, y0, x1, y1):
""" Return bounding box from extents given as (x0, y0, x1, y1).
Override this since matplotlib returns `Bbox`, not subclass, instance.
"""
points = np.array([[x0, y0],
[x1, y1]], dtype=np.float_)
return cls(points)
def BboxProperty(attr_name, readonly=False):
""" Property trait that accesses an attr on a BoundingBox's `_bbox` trait.
Parameters
----------
attr_name : str
Name of attribute on `BoundingBox._bbox` (i.e. matplotlib's `Bbox`).
readonly : bool
If True, no setter is defined for this property.
"""
def _bbox_getter(attr_name):
def get_attr(self):
return getattr(self._bbox, attr_name)
return get_attr
def _bbox_setter(attr_name):
def set_attr(self, value):
return setattr(self._bbox, attr_name, value)
return set_attr
if readonly:
return Property(fget=_bbox_getter(attr_name))
return Property(fget=_bbox_getter(attr_name),
fset=_bbox_setter(attr_name))
class BoundingBox(HasStrictTraits):
""" Bounding box represented by two (x, y) pairs.
This is a traits-wrapper around matplotlib's `Bbox`. Whenever the
underlying bounding-box is changed, the `updated` event is fired.
"""
_bbox = Instance(MPLBbox)
updated = Event
@classmethod
def from_rect(cls, rect):
return cls.from_mpl_bbox(MPLBbox.from_rect(rect))
@classmethod
def from_size(cls, size):
rect = [0, 0] + list(size)
return cls.from_mpl_bbox(MPLBbox.from_rect(rect))
@classmethod
def from_extents(cls, x0, y0, x1, y1):
return cls.from_mpl_bbox(MPLBbox.from_extents(x0, y0, x1, y1))
@classmethod
def from_mpl_bbox(cls, bbox):
instance = cls(_bbox=bbox)
bbox.changed.connect(instance._bbox_updated)
return instance
def copy(self):
return self.__class__.from_rect(self.rect)
def _bbox_updated(self):
"""Callback method for observer pattern."""
self.updated = True
# -------------------------------------------------------------------------
# Bounds Accessors
# -------------------------------------------------------------------------
x0 = BboxProperty('x0')
x1 = BboxProperty('x1')
y0 = BboxProperty('y0')
y1 = BboxProperty('y1')
x_limits = BboxProperty('intervalx')
y_limits = BboxProperty('intervaly')
rect = BboxProperty('bounds')
width = BboxProperty('width', readonly=True)
height = BboxProperty('height', readonly=True)
size = Property
def update_from_x_data(self, x_data):
x_span = calc_bounds(x_data, self.x_limits)
if x_span is not None:
self.x_limits = x_span
def update_from_y_data(self, y_data):
y_span = calc_bounds(y_data, self.y_limits)
if y_span is not None:
self.y_limits = y_span
def update_from_extents(self, x_min, y_min, x_max, y_max):
x0 = min(x_min, self.x_limits[0])
x1 = max(x_max, self.x_limits[1])
y0 = min(y_min, self.y_limits[0])
y1 = max(y_max, self.y_limits[1])
self.rect = (x0, y0, x1 - x0, y1 - y0)
def _get_size(self):
return (self.width, self.height)
def _set_size(self, size):
self.rect = (self.x0, self.y0) + tuple(size)
| bsd-3-clause |
ArnossArnossi/checkmate | checkmate/contrib/plugins/git/test/lib/test_repository.py | 3 | 4705 | """
This file is part of checkmate, a meta code checker written in Python.
Copyright (C) 2015 Andreas Dewes, QuantifiedCode UG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import pytest
from ...lib.repository import Repository
from .. import test_repository_directory
import tempfile
import os
import os.path
import subprocess
import datetime
@pytest.fixture(scope = "function")
def tmpdir(request):
tmpdir = tempfile.mkdtemp()
def finalizer():
subprocess.call(["rm","-rf",tmpdir])
request.addfinalizer(finalizer)
return tmpdir
@pytest.fixture(scope = "function")
def blank_repository(request,tmpdir):
repository = Repository(path = tmpdir)
return repository
@pytest.fixture(scope = "function")
def initialized_repository(request,blank_repository,test_repository_directory):
blank_repository.init()
blank_repository.add_remote(url = test_repository_directory,name = "my_origin")
blank_repository.pull(remote = "my_origin",branch = "master")
return blank_repository
def test_init(blank_repository):
blank_repository.init()
assert os.path.exists(blank_repository.path+"/.git")
assert os.path.isdir(blank_repository.path+"/.git")
def test_add_remote(blank_repository,test_repository_directory):
blank_repository.init()
blank_repository.add_remote(url = test_repository_directory,name = "my_origin")
assert blank_repository.get_remotes() == ["my_origin"]
def test_pull_master_from_origin(initialized_repository):
initialized_repository.pull("master","my_origin")
assert set(['README.md','d3py','setup.py','tests'])\
.issubset(set(os.listdir(initialized_repository.path)))
def test_get_files_in_commit(initialized_repository):
commit_sha = 'e29a09687b91381bf96034fcf1921956177c2d54'
files_in_commit = initialized_repository.get_files_in_commit(commit_sha)
valid_files_in_commit = set([u'd3py/geoms/bar.py',
u'd3py/templates.py',
u'd3py/geoms/area.py',
u'examples/d3py_line.py',
u'd3py/geoms/geom.py',
u'd3py/__init__.py',
u'examples/d3py_area.py',
u'tests/test_javascript.py',
u'examples/d3py_scatter.py',
u'd3py/geoms/__init__.py',
u'examples/d3py_bar.py',
u'd3py/geoms/xaxis.py',
u'.gitignore',
u'd3py/d3.js',
u'examples/d3py_vega_scatter.py',
u'd3py/networkx_figure.py',
u'README.md',
u'examples/d3py_multiline.py',
u'examples/d3py_vega_line.py',
u'setup.py',
u'd3py/javascript.py',
u'd3py/css.py',
u'd3py/geoms/yaxis.py',
u'tests/test_figure.py',
u'd3py/geoms/line.py',
u'd3py/vega.py',
u'd3py/d3py_template.html',
u'd3py/pandas_figure.py',
u'examples/d3py_graph.py',
u'd3py/geoms/Readme.md',
u'examples/d3py_vega_area.py',
u'examples/d3py_vega_bar.py',
u'd3py/geoms/point.py',
u'd3py/HTTPHandler.py',
u'd3py/test.py',
u'd3py/vega_template.html',
u'd3py/figure.py',
u'd3py/geoms/graph.py'])
assert set([f['path'] for f in files_in_commit]) == valid_files_in_commit | agpl-3.0 |
pyrrho314/recipesystem | trunk/gempy/scripts/cleanir.py | 1 | 47708 | #!/usr/bin/env python
#
# 2007 Jul 8 - Andrew W Stephens - alpha version
# 2007 Jul 9 - AWS - beta version
# 2007 Jul 10 - AWS - move most operations to cleanquad function
# 2007 Jul 11 - AWS - use stddev to decide whether to keep orig quad
# 2007 Jul 14 - AWS - generalized code to allow different pattern sizes
# 2007 Jul 18 - AWS - fix bug generating index arrays
# 2007 Jul 20 - AWS - add quadrant bias-level normalization
# 2007 Jul 23 - AWS - add force option
# 2007 Aug 06 - AWS - f/6 spectroscopy mode: use top & bottom for pattern
# 2007 Aug 22 - AWS - add -a flag to use all pixels
# 2008 Jan 11 - AWS - check for available image extensions
# 2008 Feb 05 - AWS - don't close input file until the end (req'd if next>2)
# 2008 Oct 02 - AWS - don't write pattern unless given the -p flag
# 2009 May 03 - AWS - use conformant default output file name
# 2009 May 13 - AWS - verify FITS header (old images have unquoted release date)
# 2009 May 22 - AWS - output full-frame pattern
# 2009 May 22 - AWS - improve quadrant bias normalization
# 2009 May 23 - AWS - add optional sky frame
# 2009 May 26 - AWS - add user-supplied bias offset correction
# 2009 Jul 04 - AWS - do not try to bias correct spectral flats
# 2009 Oct 24 - AWS - add basic row filtering
# 2009 Nov 06 - AWS - ignore bad pixels flagged in DQ extension
# 2009 Nov 08 - AWS - use mode for quadrant bias level normalization
# 2009 Nov 12 - AWS - use sigma-clipped stddev to judge quality of bias normalization
# 2009 Nov 17 - AWS - fit a Gaussian to the sky pixel distribution for bias norm.
# 2010 Feb 02 - AWS - sky subtract before quadrant normalization
# 2010 Feb 18 - AWS - add sigma-clipping to row filtering
# 2010 Apr 09 - AWS - only check for gcal status if OBSTYPE = FLAT
# 2010 Apr 10 - AWS - accept list input
# 2010 Apr 13 - AWS - minor tweak of the spectroscopic regions
# 2010 Jul 11 - AWS - allow images sizes which are multiples of the pattern size
# 2010 Oct 08 - AWS - option to read in bad pixel mask (e.g. object mask from nisky)
# 2010 Oct 10 - AWS - change the -g flag to take arguments
# 2010 Oct 11 - AWS - pad GNIRS images (2 row at the top)
# 2010 Oct 12 - AWS - GNIRS row filtering using an 8-pixel wide kernel
# 2010 Dec 21 - AWS - add grid filter
# 2010 Dec 28 - AWS - select GNIRS pattern region based on camera & slit
# 2011 Feb 03 - AWS - use extension 2 for nsprepared GNIRS data
# 2011 Feb 05 - AWS - add input glob expansion
# 2011 May 05 - AWS - output 32-bit files
# To Do:
# GNIRS: Mask out padding when a DQ or pixel mask is available
# Detect and mask out objects before doing any calculations
# check if run before
# properly handle images < 1024 x 1024
# Specification of image section to use to calculate pattern
# Specification of image section affected by pattern
# Look at stddev of each row to identify which have pattern noise
#-----------------------------------------------------------------------
import datetime
import getopt
import glob
import matplotlib.pyplot as pyplot
import numpy
import os
import pyfits
from scipy.optimize import leastsq
import string
import sys
version = '2011 May 5'
#-----------------------------------------------------------------------
def usage():
print ''
print 'NAME'
print ' cleanir.py - filter pattern noise out of NIRI and GNIRS frames\n'
print 'SYNOPSIS'
print ' cleanir.py [options] infile/list\n'
print 'DESCRIPTION'
print ' This script assumes that the NIRI/GNIRS pattern noise in a quadrant'
print ' can be represented by a fixed pattern which is repeated over the'
print ' entire quadrant. The default size for this pattern is 16 pixels'
print ' wide and 4 pixels high (which may be changed via the -x and -y'
print ' flags). The pattern is determined for each quadrant by taking the'
print ' median of the pixel value distribution at each position in the'
print ' pattern. Once the median pattern has been determined for a'
print ' quadrant it is replicated to cover the entire quadrant and'
print ' subtracted, and the mean of the pattern is added back to preserve'
print ' flux. The standard deviation of all the pixels in the quadrant'
print ' is compared to that before the pattern subtraction, and if no'
print ' reduction was achieved the subtraction is undone. The pattern'
print ' subtraction may be forced via the -f flag. This process is'
print ' repeated for all four quadrants and the cleaned frame is written'
print ' to c<infile> (or the file specified with the -o flag). The'
print ' pattern derived for each quadrant may be saved with the -p flag.'
print ''
print ' Pattern noise is often accompanied by an offset in the bias'
print ' values between the four quadrants. One may want to use the'
print ' -q flag to try to remove this offset. This attempts to match'
print ' the iteratively determined median value of each quadrant.'
print ' This method works best with sky subtraction (i.e. with the -s'
print ' flag), and does not work well if there are large extended objects'
print ' in the frame. By default the median is determined from the'
print ' entire frame, although the -c flag will only use a central'
print ' portion of the image. Note that the derived quadrant offsets'
print ' will be applied to the output pattern file.'
print ''
print ' Removing the pattern from spectroscopy is more difficult because'
print ' of many vertical sky lines. By default f/6 spectroscopy with the'
print ' 2-pixel or blue slits (which do not fill the detector), uses the'
print ' empty regions at the bottom (1-272) and top (720-1024) of the'
print ' array for measuring the pattern. This is not possible for other'
print ' modes of spectroscopy where the spectrum fills the detector.'
print ' For these modes it is best to do sky subtraction before pattern'
print ' removal. The quickest method is to pass a sky frame (or an offset'
print ' frame) via the -s flag. The manual method is to generate and'
print ' subtract the sky, determine and save the pattern via the -p flag,'
print ' then subtract the pattern from the original image. One may use'
print ' the -a flag to force using all of the pixels for the pattern'
print ' determination.'
print ''
print ' Note that you may use glob expansion in infile, however, the'
print ' entire string must then be quoted or any pattern matching'
print ' characters (*,?) must be escaped with a backslash.'
print ''
print 'OPTIONS'
print ' -a : use all pixels for pattern determination'
print ' -b <badpixelmask> : specify a bad pixel mask (overrides DQ plane)'
print ' -c <frac> : use central <fraction> of image for bias adjustment [1]'
print ' -d <dir> : specify an input data directory'
print ' -f : force cleaning of all quads even if stddev does not decrease'
print ' -g # : graph results (0=none, 1=pattern, 2=offsets, 3=both)'
print ' -m : use median instead of fitting a Gaussian'
print ' -o <file> : write output to <file> (instead of c<infile>)'
print ' -p <file> : write full-frame pattern to <file>'
print ' -q : adjust quadrant offsets'
print ' -r : row filtering (useful for GNIRS XD spectra)'
print ' -s <sky> : sky frame to help in pattern recognition'
print ' -t : apply test grid filter before normalizing quadrants'
print ' -v : verbose debugging output'
print ' -x <size> : set pattern x size in pix [16]'
print ' -y <size> : set pattern y size in pix [4]\n'
print 'VERSION'
print ' ', version
print ''
raise SystemExit
#-----------------------------------------------------------------------
def main():
global allpixels, applygridfilter, bad, badmask
global bias1, bias2, bias3, bias4, biasadjust
global cfrac, force, graph, median, output
global patternfile, datadir, rowfilter, savepattern
global skyfile, skysub, subtractrowmedian, pxsize, pysize, verbose
try:
opts, args = getopt.getopt(sys.argv[1:], 'ab:c:d:fg:hmo:p:qrs:tx:y:v', ['q1=','q2=','q3=','q4='])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
if (len(args)) != 1:
usage()
nargs = len(sys.argv[1:])
nopts = len(opts)
allpixels = False
applygridfilter = False
bad = -9.e6 # value assigned to bad pixels
badmask = 'DQ'
bias1 = 0.0
bias2 = 0.0
bias3 = 0.0
bias4 = 0.0
biasadjust = False
cfrac = 1.0 # use whole image
force = False
graph = 0
median = False
output = 'default'
patternfile = ''
datadir = ''
rowfilter = False
savepattern = False
skyfile = ''
skysub = False
subtractrowmedian = False
pxsize = 16
pysize = 4
verbose = False
for o, a in opts:
if o in ('-a'): # force using all pixels for pattern determination
allpixels = True
elif o in ('-b'):
badmask = a
elif o in ('-c'): # use central fraction for bias normalization
cfrac = float(a)
elif o in ('-d'): # input data directory
datadir = a
elif o in ('-f'): # force pattern subtraction in every quadrant
force = True
elif o in ('-g'): # graph results
graph = int(a)
elif o in ('-o'): # specify cleaned output file
output = a
elif o in ('-m'):
median = True
elif o in ('-p'): # write pattern file
patternfile = a
savepattern = True
elif o in ('-q'): # try to adjust quadrant bias values
biasadjust = True
elif o in ('--q1'): # bias offset for quadrant 1
bias1 = float(a)
elif o in ('--q2'):
bias2 = float(a)
elif o in ('--q3'):
bias3 = float(a)
elif o in ('--q4'):
bias4 = float(a)
elif o in ('-r'): # row filtering
rowfilter = True
elif o in ('-s'): # sky frame
skyfile = a
skysub = True
elif o in ('-t'): # test grid filter
applygridfilter = True
elif o in ('-x'): # specify pattern x-dimension
pxsize = int(a)
elif o in ('-y'): # specify pattern y-dimension
pysize = int(a)
elif o in ('-v'): # verbose debugging output
verbose = True
else:
assert False, "unhandled option"
inputfile = args[0]
files = glob.glob(inputfile)
if (verbose):
print '...input = ', inputfile
print '...files = ', files
print ''
for f in files:
if IsFits(f):
cleanir(f)
else: # file list
print 'Expanding ' + f + '...\n'
inlist = open(f,'r')
for line in inlist:
cleanir(line.strip())
inlist.close()
#-----------------------------------------------------------------------
def IsFits(infile):
global datadir
# If the file exists and has a .fits extension assume that it is FITS:
if os.path.exists(datadir + infile):
if infile.endswith('.fits'):
fits = True
else:
fits = False
elif os.path.exists(infile): # Check for lists in the CWD
if infile.endswith('.fits'):
fits = True
else:
fits = False
else: # assume it is a FITS image missing the .fits extension
fits = True
return fits
#-----------------------------------------------------------------------
def IterStat(vector, lowsigma=3, highsigma=3):
global verbose
median = numpy.median(vector)
stddev = numpy.std(vector)
minval = median - lowsigma * stddev
maxval = median + highsigma * stddev
num = numpy.size(vector)
dn = 1000
while (dn > 1 and stddev > 0 ):
tmpvec = vector[(vector>minval) & (vector<maxval)]
median = numpy.median(tmpvec)
stddev = numpy.std(tmpvec)
dn = num - numpy.size(tmpvec)
num = numpy.size(tmpvec)
if (verbose):
print ' ...median=',median,' stddev=',stddev,' min=',minval,' max=',maxval,' N=',num,' dN=',dn
minval = median - lowsigma * stddev
maxval = median + highsigma * stddev
return (median, stddev)
#-----------------------------------------------------------------------
def CleanQuad(quad,patternin):
# quad = quadrant to be pattern-subtracted
# patternin = region to use for pattern determination
global qxsize, qysize # quadrant size
global pxsize, pysize # pattern size
if (verbose):
print ' ...mean of input quadrant =', numpy.mean(quad)
print ' ...median of input quadrant =', numpy.median(quad)
# create arrays of indices which correspond to the pattern tiled over
# the region of the input quadrant to be used for pattern determination
inpx = len(patternin[0])
inpy = len(patternin)
if (verbose):
print ' ...size of pattern determination region =',inpx,'x',inpy
indx = numpy.tile(numpy.arange(0,inpx,pxsize), inpy/pysize)
indy = numpy.arange(0,inpy,pysize).repeat(inpx/pxsize)
if (verbose):
print ' ...indx:', indx
print ' ...indy:', indy
# create blank pattern array:
pattern = numpy.zeros(pysize*pxsize).reshape(pysize,pxsize)
origstddev = numpy.std(quad)
print ' ...standard deviation of input quadrant =%9.3f' % origstddev
# find median pattern across quadrant:
if (graph > 0):
binwidth = 0.5
binmin = inputmedian - 3. * inputstddev
binmax = inputmedian + 3. * inputstddev
bins = numpy.arange( binmin, binmax, binwidth )
bincenters = bins[1:bins.size] - binwidth/2.
iplot = 0
for iy in range(0, pysize):
for ix in range(0, pxsize):
tmpdata = patternin[indy+iy,indx+ix]
pattern[iy,ix] = numpy.median(tmpdata[tmpdata!=bad]) # filter out bad pix
if (graph==1 or graph==3):
iplot += 1
hist,bins = numpy.histogram(tmpdata, bins=bins)
plot = pyplot.subplot(pysize,pxsize,iplot)
pyplot.plot(bincenters, hist, linestyle='', marker='.')
pyplot.axvline(x=pattern[iy,ix], ls='--', color='green')
if ix != 0:
plot.set_yticklabels([])
if (verbose):
print '...pattern:', pattern
if (graph==1 or graph==3):
print ('...graphing results...')
pyplot.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, wspace=0., hspace=0.2)
pyplot.show()
# tile pattern over quadrant:
quadpattern = numpy.tile(pattern, (qysize/pysize, qxsize/pxsize))
quadpattern -= numpy.mean(pattern) # set the mean value to zero
#print ' ...mean of pattern = ', numpy.mean(quadpattern)
cleanedquad = quad - quadpattern # subtract pattern
cleanstddev = numpy.std(cleanedquad) # calculate standard deviation
print ' ...standard deviation of cleaned quadrant = %.3f' % cleanstddev
if (force):
print ' ...forcing pattern subtraction'
else:
# has subtracting the pattern reduced the standard deviation?
if ( origstddev - cleanstddev > 0.01 ):
print ' ...improvement!'
else:
print ' ...no significant improvement so using the original quadrant'
cleanedquad = quad # reset quadrant pixels to original values
quadpattern = quadpattern * 0 # set pattern to zeros
return cleanedquad, quadpattern
#-----------------------------------------------------------------------
def CleanRow(row, sample, value):
# row = row to be pattern-subtracted
# sample = sample used to measure pattern
# value = desired final median value of row
indx = numpy.arange(0,len(sample),8)
pattern = numpy.zeros(8)
for ix in range(0, 8):
tmpdata = sample[indx+ix]
tmpdata = tmpdata[tmpdata!=bad] # filter out bad pix
# pattern[ix] = numpy.median(tmpdata)
median,stddev = IterStat(tmpdata)
pattern[ix] = median
if (verbose):
print '...pattern:', pattern
# repeat the pattern over the row and subtract:
rowpattern = numpy.tile(pattern, len(row)/8)
cleanedrow = row - rowpattern + value
#median,stddev = IterStat(cleanedrow, lowsigma=3, highsigma=1)
#cleanedrow = cleanedrow + (value - median)
return cleanedrow
#-----------------------------------------------------------------------
def ApplyRowFilter(quad, patternin):
# quad = quadrant to be pattern-subtracted
# patternin = region to use for pattern determination
global qxsize, qysize # quadrant size
quadmedian,quadstddev = IterStat(patternin) # iterative median
print '...median of input sample quadrant =', quadmedian, '+/-', quadstddev
for iy in range(0,qysize): # this is not correct, but will work for GNIRS
if (verbose):
print '...row =', iy
quad[iy] = CleanRow(quad[iy], patternin[iy], quadmedian)
#quad[iy] = CleanRow(quad[iy], patternin[iy], inputmedian)
return quad
#-----------------------------------------------------------------------
def gaussian(t,p): # p[0] = mu p[1] = sigma p[2] = peak
return(p[2] * numpy.exp( -(t - p[0])**2 / (2 * p[1]**2) ))
def residuals(p,data,t):
err = data - gaussian(t,p)
return err
def NormQuadMedian(quad):
global bins, bincenters, inputmean, inputmedian, inputstddev
global lsigma, hsigma, bad
hist,bins = numpy.histogram(quad, bins=bins)
if (verbose):
print '...calculating median using low-sigma =',lsigma,' and high-sigma =',hsigma
mincts = inputmedian - lsigma * inputstddev
maxcts = inputmedian + hsigma * inputstddev
if (verbose):
print '...input median=',inputmedian,' min=',mincts,' max=',maxcts
flatquad = quad[quad != bad] # flatten array and filter out bad pix
npix = numpy.size(flatquad)
dn = 100
while (npix > 10000 and dn > 10):
tmpquad = flatquad[(flatquad>mincts) & (flatquad<maxcts)]
median = numpy.median(tmpquad)
stddev = numpy.std(tmpquad)
mincts = median - lsigma * stddev
maxcts = median + hsigma * stddev
dn = npix - numpy.size(tmpquad)
npix = numpy.size(tmpquad)
if (verbose):
print '...median=',median,' stddev=',stddev,' min=',mincts,' max=',maxcts,' npix=',npix,' dn=',dn
offset = inputmedian - median
print ' ...offset = %.3f' % offset
return hist, median, offset
#-----------------------------------------------------------------------
def NormQuadGauss(quad):
global bins, bincenters, inputmean, inputmedian, inputstddev
hist,bins = numpy.histogram(quad, bins=bins)
mode = bins[ hist.argmax() ]
peak = hist.max()
fitsigma = 1.0 # this should probably be a command-line parameter
mincts = mode - fitsigma * inputstddev
maxcts = mode + fitsigma * inputstddev
t = bincenters[ (bincenters>mincts) & (bincenters<maxcts) ]
data = hist[ (bincenters>mincts) & (bincenters<maxcts) ]
p0 = [mode, inputstddev, peak]
print ' ...initial parameter guesses = %.3f %.3f %.0f' % (p0[0],p0[1],p0[2])
pbest = leastsq(residuals, p0, args=(data,t), full_output=1)
p = pbest[0]
print ' ...best fit parameters = %.3f %.3f %.0f' % (p[0],p[1],p[2])
offset = inputmean - p[0]
print ' ...offset = %.3f' % offset
xfit = numpy.linspace(mincts, maxcts, 100)
yfit = gaussian(xfit, p)
return hist, p[0], offset, xfit, yfit
#-----------------------------------------------------------------------
def GridFilter(img):
global qxsize, qysize # quadrant size
gsize = 64
indx = numpy.tile(numpy.arange(0,qxsize-gsize+1,gsize), qysize/gsize)
indy = numpy.arange(0,qysize-gsize+1,gsize).repeat(qxsize/gsize)
tmpimg = numpy.zeros((gsize, gsize))
for iy in range(0, gsize):
for ix in range(0, gsize):
tmpdata = img[indy+iy,indx+ix]
tmpimg[iy,ix] = numpy.median(tmpdata)
return tmpimg
#-----------------------------------------------------------------------
def cleanir(inputfile):
global allpixels, badmask, biasadjust, cfrac, force, median, rowfilter, skysub, verbose
global bias1, bias2, bias3, bias4
global inputmedian, inputstddev
global datadir, output, pattern, patternfile, skyfile, pxsize, pysize, qxsize, qysize
global bins, bincenters, inputmean, imputmedian, inputstddev, lsigma, hsigma
print 'CLEANIR v.', version
havedq = False # we have DQ information
if (verbose):
print '...inputfile =', inputfile
print '...allpixels =', allpixels
print '...badmask =', badmask
print '...biasadjust =', biasadjust
print '...bias1 =', bias1
print '...bias2 =', bias2
print '...bias3 =', bias3
print '...bias4 =', bias4
print '...cfrac =', cfrac
print '...datadir =', datadir
print '...force =', force
print '...median =', median
print '...output =', output
print '...patternfile =', patternfile
print '...row filter =', rowfilter
print '...skyfile =', skyfile
print '...skysub =', skysub
print '...pxsize =', pxsize
print '...pysize =', pysize
if not inputfile.endswith('.fits'):
inputfile = inputfile + '.fits'
if (output == 'default'):
outputfile = 'c' + os.path.basename(inputfile)
else:
outputfile = output
if ( not outputfile.endswith('.fits') ):
outputfile = outputfile + '.fits'
if (datadir != ''):
inputfile = datadir + '/' + inputfile
print 'Removing pattern noise from', inputfile
if (savepattern):
if ( not patternfile.endswith('.fits') ):
patternfile = patternfile + '.fits'
if (skysub):
if ( not skyfile.endswith('.fits') ):
skyfile = skyfile + '.fits'
if not os.path.exists(inputfile): # check whether input file exists
print 'ERROR: ', inputfile, 'does not exist'
sys.exit(2)
if os.path.exists(outputfile): # check whether output file exists
print '...removing old', outputfile
os.remove(outputfile)
if (savepattern):
if os.path.exists(patternfile): # check whether pattern file exists
print '...removing old', patternfile
os.remove(patternfile)
if (skysub):
if not os.path.exists(skyfile): # check whether sky file exists
print skyfile, 'does not exist'
sys.exit(2)
if (cfrac < 0.1):
print 'ERROR: central fraction must be >= 0.1'
sys.exit(2)
if (cfrac > 1):
print 'ERROR: central fraction must be <= 1'
sys.exit(2)
if (verbose):
print '...reading', inputfile
hdulist = pyfits.open(inputfile)
if (verbose):
print '...hdulist:', hdulist.info()
if (verbose):
print '...verifying FITS header...'
hdulist.verify('fix')
else:
hdulist.verify('silentfix')
next = len(hdulist)
if (verbose):
print '...number of extensions = ', next
if ( next == 1 ):
sci = 0
elif ( next < 5 ):
sci = 1
else:
sci = 2
if (verbose):
print '...assuming the science data are in extension', sci
image = numpy.array(hdulist[sci].data)
if (verbose):
print '...SCI: ', image
print image.dtype.name
try:
naxis1,naxis2 = hdulist[sci].header['naxis1'], hdulist[sci].header['naxis2']
except:
print 'ERROR: cannot get the dimensions of extension ', sci
pyfits.info(inputfile)
sys.exit(2)
print '...image dimensions = ', naxis1, 'x', naxis2
try:
instrument = hdulist[0].header['INSTRUME']
if (verbose):
print '...instrument =', instrument
except:
print 'WARNING: cannot determine instrument'
instrument = 'INDEF'
allpixels = True
try:
nscut = hdulist[0].header['NSCUT']
nscut = True
except:
nscut = False
if (verbose):
print '...nscut =', nscut
if instrument == 'GNIRS':
print '...padding the top of GNIRS image...'
pad = numpy.zeros((2,naxis1), dtype=numpy.float32) # create 2D array of padding
image = numpy.append(image,pad,axis=0) # append the padding array to the end
if (verbose):
print '...new image: ', image
naxis2 = naxis2 + 2
print '...image dimensions = ', naxis1, 'x', naxis2
print '...pattern size =', pxsize, 'x', pysize
qxsize = naxis1 / 2 # quadrant x size
qysize = naxis2 / 2 # quadrant y size
if qxsize%pxsize != 0 or qysize%pysize != 0:
print 'ERROR: quadrant size is not a multiple of the pattern size'
sys.exit(2)
if pxsize > qxsize or pysize > qysize:
print 'ERROR: pattern size is larger than the quadrant size!'
sys.exit(2)
#-----------------------------------------------------------------------
if badmask == 'DQ':
if (verbose):
print '...reading data quality extension...'
try:
dq = numpy.array(hdulist['DQ'].data)
havedq = True
if (verbose):
print '...DQ: ', dq
if ( numpy.size(dq[dq>0]) > numpy.size(dq)/2 ):
print 'WARNING:', numpy.size(dq[dq>0]), 'pixels are flagged as bad in the DQ plane!'
except:
print '...no DQ data found'
# dq = numpy.zeros(naxis2*naxis1,int)
# dq.resize(naxis2,naxis1)
elif os.path.exists(badmask): # bad pixel mask specified on the command line
if (verbose):
print '...reading bad pixel mask', badmask
if badmask.endswith('.pl'):
if (verbose):
print '...converting pl to fits'
# fitsfile = inputfile.replace('.pl', '.fits')
tmpbadmask = 'cleanir-badpixelmask.fits'
if os.path.exists(tmpbadmask):
os.remove(tmpbadmask)
from pyraf import iraf
iraf.imcopy(badmask, tmpbadmask)
badmask = tmpbadmask
badmaskhdu = pyfits.open(badmask)
if (verbose):
badmaskhdu.info()
dq = numpy.array(badmaskhdu[0].data)
havedq = True
if badmask.endswith('.pl'):
os.remove(tmpbadmask)
if (verbose):
print '...DQ: ', dq
badmaskhdu.close()
else:
print 'WARNING: ', badmask, 'does not exist'
print 'Turning off quadrant normalization'
biasadjust = False
#-----------------------------------------------------------------------
if (biasadjust):
try:
obstype = hdulist[0].header['OBSTYPE']
except:
print 'WARNING: cannot determine obstype'
obstype = 'INDEF'
if (verbose):
print '...obstype =', obstype
if (obstype == 'FLAT'):
try:
gcalshutter = hdulist[0].header['GCALSHUT']
except:
print 'WARNING: cannot determine GCAL shutter status'
if (verbose):
print '...gcal shutter =', gcalshutter
if (gcalshutter == 'OPEN'):
print '...this is a lamps-on flat, so turning off bias normalization...'
biasadjust = False
# Bias level adjustment should probably only be done on flat-fielded data.
#-----------------------------------------------------------------------
if (skysub):
print '...reading sky', skyfile
sky = pyfits.open(skyfile)
print '...verifying sky...'
if (verbose):
sky.verify('fix')
else:
sky.verify('silentfix')
skyimage = numpy.array(sky[sci].data)
if instrument == 'GNIRS':
print '...padding the top of the GNIRS sky...'
skyimage = numpy.append(skyimage,pad,axis=0) # append the padding array to the end
# NEED ERROR CHECKING HERE! (extensions, image size, filter, instrument, etc.)
#-----------------------------------------------------------------------
if (subtractrowmedian):
print '...subtracting the median of each rows...'
imagemean = numpy.mean(image)
for iy in range(0, naxis2):
tmprow = image[iy,:]
if (verbose):
print '...row ', iy
median,stddev = IterStat(tmprow) # iterative median
image[iy,:] -= median
image += ( imagemean - image.mean() ) # reset image to the original mean value
# image[iy,:] -= numpy.median(image[iy,:]) # simple row-filtering over the whole image
# Median filter each quadrant:
# image[iy,0:naxis1/2] -= numpy.median(image[iy,0:naxis1/2])
# image[iy,naxis1/2:naxis1] -= numpy.median(image[iy,naxis1/2:naxis1])
#-----------------------------------------------------------------------
# Set regions to be used for pattern determination:
# +-------+
# | 1 | 2 |
# +---+---+
# | 3 | 4 |
# +---+---+
if instrument == 'NIRI':
camera = 'INDEF'
decker = 'INDEF'
try:
fpmask = hdulist[0].header['FPMASK']
except:
print 'WARNING: cannot find FPMASK header keyword'
print ' Assuming that this is imaging...'
fpmask = 'f6-cam_G5208'
elif instrument == 'GNIRS':
fpmask = 'INDEF'
try:
camera = hdulist[0].header['CAMERA']
except:
print 'WARNING: cannot find CAMERA header keyword'
camera = 'INDEF'
try:
decker = hdulist[0].header['DECKER']
except:
print 'WARNING: cannot find DECKER header keyword'
decker = 'INDEF'
else:
fpmask = 'INDEF'
camera = 'INDEF'
decker = 'INDEF'
allpixels = True
if (verbose):
print '...fpmask = ', fpmask
print '...camera = ', camera
print '...decker = ', decker
if allpixels:
print '...using whole image for pattern determination'
q1x1,q1x2, q1y1,q1y2 = 0,qxsize, qysize,naxis2 # quad 1
q2x1,q2x2, q2y1,q2y2 = qxsize,naxis1, qysize,naxis2 # quad 2
q3x1,q3x2, q3y1,q3y2 = 0,qxsize, 0,qysize # quad 3
q4x1,q4x2, q4y1,q4y2 = qxsize,naxis1, 0,qysize # quad 4
lsigma = 3.0
hsigma = 1.0 # set a very small upper threshold to reject stars
elif fpmask == 'f6-2pixBl_G5214' or \
fpmask == 'f6-4pixBl_G5215' or \
fpmask == 'f6-6pixBl_G5216' or \
fpmask == 'f6-2pix_G5211':
print '...using region above and below slit (y<=272 and y>=728) for pattern determination'
q1x1,q1x2, q1y1,q1y2 = 0,qxsize, 728,naxis2
q2x1,q2x2, q2y1,q2y2 = qxsize,naxis1, 728,naxis2
q3x1,q3x2, q3y1,q3y2 = 0,qxsize, 0,272
q4x1,q4x2, q4y1,q4y2 = qxsize,naxis1, 0,272
lsigma = 3.0
hsigma = 3.0
elif fpmask == 'f6-4pix_G5212' or \
fpmask == 'f6-6pix_G5213' or \
fpmask == 'f32-6pix_G5229' or \
fpmask == 'f32-9pix_G5230':
print '...using whole image for pattern determination'
print 'WARNING: Sky lines may be altered by pattern removal!'
q1x1,q1x2, q1y1,q1y2 = 0,qxsize, qysize,naxis2
q2x1,q2x2, q2y1,q2y2 = qxsize,naxis1, qysize,naxis2
q3x1,q3x2, q3y1,q3y2 = 0,qxsize, 0,qysize
q4x1,q4x2, q4y1,q4y2 = qxsize,naxis1, 0,qysize
lsigma = 3.0
hsigma = 3.0
elif 'Short' in camera and decker != 'SC_XD':
print '...using x<=160 and x>=864 for pattern determination'
q1x1,q1x2, q1y1,q1y2 = 0,160, qysize,naxis2
q2x1,q2x2, q2y1,q2y2 = 864,naxis1, qysize,naxis2
q3x1,q3x2, q3y1,q3y2 = 0,160, 0,qysize
q4x1,q4x2, q4y1,q4y2 = 864,naxis1, 0,qysize
lsigma = 3.0
hsigma = 3.0
else:
print '...using whole image for pattern determination'
q1x1,q1x2, q1y1,q1y2 = 0,qxsize, qysize,naxis2 # quad 1
q2x1,q2x2, q2y1,q2y2 = qxsize,naxis1, qysize,naxis2 # quad 2
q3x1,q3x2, q3y1,q3y2 = 0,qxsize, 0,qysize # quad 3
q4x1,q4x2, q4y1,q4y2 = qxsize,naxis1, 0,qysize # quad 4
lsigma = 3.0
hsigma = 1.0 # set a very small upper threshold to reject stars
patternin = image.copy()
patternin1 = patternin[q1y1:q1y2, q1x1:q1x2]
patternin2 = patternin[q2y1:q2y2, q2x1:q2x2]
patternin3 = patternin[q3y1:q3y2, q3x1:q3x2]
patternin4 = patternin[q4y1:q4y2, q4x1:q4x2]
#-------------------------------------------------------------------
# Subtract sky frame
if (skysub):
print '...subtracting sky...'
patternin1 -= skyimage[q1y1:q1y2, q1x1:q1x2]
patternin2 -= skyimage[q2y1:q2y2, q2x1:q2x2]
patternin3 -= skyimage[q3y1:q3y2, q3x1:q3x2]
patternin4 -= skyimage[q4y1:q4y2, q4x1:q4x2]
#-------------------------------------------------------------------
# Flag pixels with bad DQ
if (havedq):
print '...flagging bad pixels...'
dq1 = dq[q1y1:q1y2, q1x1:q1x2]
dq2 = dq[q2y1:q2y2, q2x1:q2x2]
dq3 = dq[q3y1:q3y2, q3x1:q3x2]
dq4 = dq[q4y1:q4y2, q4x1:q4x2]
patternin1[dq1==1] = bad
patternin2[dq2==1] = bad
patternin3[dq3==1] = bad
patternin4[dq4==1] = bad
#-------------------------------------------------------------------
# Calculate means and medians for reference:
inputmean = numpy.mean(image)
print '...mean of input image = %.3f' % inputmean
if (biasadjust or graph>1):
#inputmedian, inputstddev = IterStat(image) # sigma-clipped
allpatternin = numpy.concatenate(seq=(patternin1,patternin2,patternin3,patternin4))
allpatternin = allpatternin[allpatternin!=bad] # filter out bad values
inputmedian, inputstddev = IterStat(allpatternin)
print '...sigma-clipped median = %.3f' % inputmedian
print '...sigma-clipped stddev = %.3f' % inputstddev
#-------------------------------------------------------------------
# calculate and subtract pattern:
quads = image.copy()
quad1 = quads[qysize:naxis2, 0:qxsize]
quad2 = quads[qysize:naxis2, qxsize:naxis1]
quad3 = quads[ 0:qysize, 0:qxsize]
quad4 = quads[ 0:qysize, qxsize:naxis1]
print '...upper left quadrant:'
clean1, pattern1 = CleanQuad(quad1,patternin1)
print '...upper right quadrant:'
clean2, pattern2 = CleanQuad(quad2,patternin2)
print '...lower left quadrant:'
clean3, pattern3 = CleanQuad(quad3,patternin3)
print '...lower right quadrant:'
clean4, pattern4 = CleanQuad(quad4,patternin4)
if (verbose):
print '...reassembling new image...'
newimage = image.copy()
newimage[qysize:naxis2, 0:qxsize] = clean1
newimage[qysize:naxis2, qxsize:naxis1] = clean2
newimage[ 0:qysize, 0:qxsize] = clean3
newimage[ 0:qysize, qxsize:naxis1] = clean4
if (verbose):
print '...updating header...'
timestamp = datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S")
if (verbose):
print '...time stamp =', timestamp
hdulist[0].header.add_history('Cleaned with cleanir.py ' + timestamp)
#-----------------------------------------------------------------------
# Use the cleaned image from here on out
patternin = newimage.copy()
if (skysub):
print '...subtracting sky...'
patternin -= skyimage
patternin1 = patternin[q1y1:q1y2, q1x1:q1x2]
patternin2 = patternin[q2y1:q2y2, q2x1:q2x2]
patternin3 = patternin[q3y1:q3y2, q3x1:q3x2]
patternin4 = patternin[q4y1:q4y2, q4x1:q4x2]
#-----------------------------------------------------------------------
# GNIRS 8-pixel row filtering
# Go through each row of each quadrant and generate an 8-pixel wide kernel,
# subtract it, and then add back the previously measured quadrant mean.
if rowfilter:
print '...filtering rows...'
print '...upper left quadrant:'
clean1 = ApplyRowFilter(newimage[qysize:naxis2, 0:qxsize], patternin1)
print '...upper right quadrant:'
clean2 = ApplyRowFilter(newimage[qysize:naxis2, qxsize:naxis1], patternin2)
print '...lower left quadrant:'
clean3 = ApplyRowFilter(newimage[ 0:qysize, 0:qxsize], patternin3)
print '...lower right quadrant:'
clean4 = ApplyRowFilter(newimage[ 0:qysize, qxsize:naxis1], patternin4)
if (verbose):
print '...reassembling image...'
newimage[qysize:naxis2, 0:qxsize] = clean1
newimage[qysize:naxis2, qxsize:naxis1] = clean2
newimage[ 0:qysize, 0:qxsize] = clean3
newimage[ 0:qysize, qxsize:naxis1] = clean4
# Use the cleaned image from here on out
patternin = newimage.copy()
if (skysub):
print '...subtracting sky...'
patternin -= skyimage
patternin1 = patternin[q1y1:q1y2, q1x1:q1x2]
patternin2 = patternin[q2y1:q2y2, q2x1:q2x2]
patternin3 = patternin[q3y1:q3y2, q3x1:q3x2]
patternin4 = patternin[q4y1:q4y2, q4x1:q4x2]
#-------------------------------------------------------------------
# Normalize each quadrant:
if (biasadjust):
print '...normalizing the bias level of each quadrant...'
# And apply the measured offset to the pattern output
if (havedq): # Flag pixels with bad DQ
print '...flagging bad pixels...'
dq1 = dq[qysize:(1+cfrac)*qysize, (1-cfrac)*qxsize:qxsize]
dq2 = dq[qysize:(1+cfrac)*qysize, qxsize:(1+cfrac)*qxsize]
dq3 = dq[(1-cfrac)*qysize:qysize, (1-cfrac)*qxsize:qxsize]
dq4 = dq[(1-cfrac)*qysize:qysize, qxsize:(1+cfrac)*qxsize]
patternin1[dq1==1] = bad
patternin2[dq2==1] = bad
patternin3[dq3==1] = bad
patternin4[dq4==1] = bad
binmin = inputmedian - 5. * inputstddev
binmax = inputmedian + 5. * inputstddev
binwidth = 1.0
if (binmax - binmin) / binwidth < 50: # if too few bins the least-squares minimization will fail
binwidth = (binmax - binmin) / 50.
if (verbose):
print '...median =', inputmedian,' stddev =', inputstddev
bins = numpy.arange( binmin, binmax, binwidth )
bincenters = bins[1:bins.size] - binwidth/2.
print '...binning into', bins.size, 'bins from', binmin, 'to', binmax
if (applygridfilter):
print '...applying grid filter to each quadrant...'
patternin1 = GridFilter(patternin1)
patternin2 = GridFilter(patternin2)
patternin3 = GridFilter(patternin3)
patternin4 = GridFilter(patternin4)
if (median):
fit = False
print '...Using median for quadrant normalization.'
print '...upper left quadrant:'
hist1,center1,offset1 = NormQuadMedian(patternin1)
print '...upper right quadrant:'
hist2,center2,offset2 = NormQuadMedian(patternin2)
print '...lower left quadrant:'
hist3,center3,offset3 = NormQuadMedian(patternin3)
print '...lower right quadrant:'
hist4,center4,offset4 = NormQuadMedian(patternin4)
else:
fit = True
print '...upper left quadrant:'
hist1,center1,offset1,xfit1,yfit1 = NormQuadGauss(patternin1)
print '...upper right quadrant:'
hist2,center2,offset2,xfit2,yfit2 = NormQuadGauss(patternin2)
print '...lower left quadrant:'
hist3,center3,offset3,xfit3,yfit3 = NormQuadGauss(patternin3)
print '...lower right quadrant:'
hist4,center4,offset4,xfit4,yfit4 = NormQuadGauss(patternin4)
newimage[qysize:naxis2, 0:qxsize] += offset1
newimage[qysize:naxis2, qxsize:naxis1] += offset2
newimage[0:qysize, 0:qxsize] += offset3
newimage[0:qysize, qxsize:naxis1] += offset4
pattern1 -= offset1
pattern2 -= offset2
pattern3 -= offset3
pattern4 -= offset4
print '...checking quality of bias normalization...'
newmedian, newstddev = IterStat(newimage)
if ( inputstddev - newstddev > 0.001 ):
print ' ...sigma-clipped stddev has decreased from %.3f to %.3f' % (inputstddev, newstddev)
offset = inputmean - numpy.mean(newimage)
print '...adjusting whole image by %.3f to match input image...' % offset
newimage += offset
else:
print ' ...sigma-clipped stddev has not significantly improved: %.3f -> %.3f' % (inputstddev, newstddev)
print ' ...undoing quadrant bias offsets...'
outimage = newimage
image[qysize:naxis2, 0:qxsize] -= offset1
image[qysize:naxis2, qxsize:naxis1] -= offset2
image[0:qysize, 0:qxsize] -= offset3
image[0:qysize, qxsize:naxis1] -= offset4
pattern1 += offset1
pattern2 += offset2
pattern3 += offset3
pattern4 += offset4
#-------------------------------------------------------------------
if (graph>1): # 2x2 grid of pixel distributions, fits & estimated sky values
print ('...graphing pixel distributions in each quadrant...')
xlimits = numpy.array([binmin, binmax])
plot = pyplot.subplot(2,2,1)
pyplot.plot(bincenters, hist1, linestyle='', marker='.')
if fit:
pyplot.plot(xfit1, yfit1, linestyle='-', color='red', linewidth=2)
pyplot.xlim(xlimits)
pyplot.axvline(x=center1, ls='--', color='green')
pyplot.text(0.05, 0.85, 'mean = %.2f' % center1, horizontalalignment='left', transform=plot.transAxes)
pyplot.text(0.95, 0.85, 'delta = %.2f' % offset1, horizontalalignment='right', transform=plot.transAxes)
pyplot.title('Quadrant 1')
plot = pyplot.subplot(2,2,2)
pyplot.plot(bincenters, hist2, linestyle='', marker='.')
if fit:
pyplot.plot(xfit2, yfit2, linestyle='-', color='red', linewidth=2)
pyplot.xlim(xlimits)
pyplot.axvline(x=center2, ls='--', color='green')
pyplot.text(0.05, 0.85, 'mean = %.2f' % center2, horizontalalignment='left', transform=plot.transAxes)
pyplot.text(0.95, 0.85, 'delta = %.2f' % offset2, horizontalalignment='right', transform=plot.transAxes)
pyplot.title('Quadrant 2')
plot = pyplot.subplot(2,2,3)
pyplot.plot(bincenters, hist3, linestyle='', marker='.')
if fit:
pyplot.plot(xfit3, yfit3, linestyle='-', color='red', linewidth=2)
pyplot.xlim(xlimits)
pyplot.axvline(x=center3, ls='--', color='green')
pyplot.text(0.05, 0.85, 'mean = %.2f' % center3, horizontalalignment='left', transform=plot.transAxes)
pyplot.text(0.95, 0.85, 'delta = %.2f' % offset3, horizontalalignment='right', transform=plot.transAxes)
pyplot.title('Quadrant 3')
plot = pyplot.subplot(2,2,4)
pyplot.plot(bincenters, hist4, linestyle='', marker='.')
if fit:
pyplot.plot(xfit4, yfit4, linestyle='-', color='red', linewidth=2)
pyplot.xlim(xlimits)
pyplot.axvline(x=center4, ls='--', color='green')
pyplot.text(0.05, 0.85, 'mean = %.2f' % center4, horizontalalignment='left', transform=plot.transAxes)
pyplot.text(0.95, 0.85, 'delta = %.2f' % offset4, horizontalalignment='right', transform=plot.transAxes)
pyplot.title('Quadrant 4')
pyplot.subplots_adjust(left=0.05, bottom=0.05, right=0.95,
top=0.95, wspace=0.2, hspace=0.2)
# top label = inputfile
pyplot.show()
#-------------------------------------------------------------------
# Apply manual bias correction if supplied:
if (bias1 != 0.0 or bias2 != 0.0 or bias3 != 0.0 or bias4 != 0.0):
print '...applying user-supplied bias offset...'
newimage[qysize:naxis2, 0:qxsize] += bias1
newimage[qysize:naxis2, qxsize:naxis1] += bias2
newimage[0:qysize, 0:qxsize] += bias3
newimage[0:qysize, qxsize:naxis1] += bias4
pattern1 -= bias1
pattern2 -= bias2
pattern3 -= bias3
pattern4 -= bias4
if (verbose):
print '...mean of input image = %.3f' % inputmean
print '...mean of output image = %.3f' % numpy.mean(newimage)
print '...median of output image = %.3f' % numpy.median(newimage)
#-------------------------------------------------------------------
# Write cleaned output image
if instrument == 'GNIRS':
print '...removing GNIRS padding...'
# remove 2-pixel padding on top of image:
# syntax: delete(array, [rows to delete], axis=0)
newimage = numpy.delete(newimage, [naxis2-1,naxis2-2], axis=0)
print '...writing', outputfile
hdulist[sci].data = newimage
hdulist.writeto(outputfile)
#-------------------------------------------------------------------
# Write pattern image
if (savepattern):
print '...assembling and writing pattern image...'
# create blank pattern array:
fullpattern = numpy.zeros(naxis2*naxis1).reshape(naxis2,naxis1)
# assemble the quadrants into a full pattern image:
fullpattern[qysize:naxis2, 0:qxsize] = pattern1
fullpattern[qysize:naxis2, qxsize:naxis1] = pattern2
fullpattern[ 0:qysize, 0:qxsize] = pattern3
fullpattern[ 0:qysize, qxsize:naxis1] = pattern4
# normalize to zero:
fullpattern -= fullpattern.mean()
print '...writing', patternfile
hdu = pyfits.PrimaryHDU(fullpattern)
hdu.writeto(patternfile)
#-------------------------------------------------------------------
# Close file
hdulist.close()
print ' '
#-----------------------------------------------------------------------
if __name__ == '__main__':
main()
#-----------------------------------------------------------------------
| mpl-2.0 |
carlesfernandez/gnss-sdr | src/algorithms/libs/volk_gnsssdr_module/volk_gnsssdr/apps/plot_best_vs_generic.py | 2 | 1744 | #!/usr/bin/env python
#
# GNSS-SDR is a Global Navigation Satellite System software-defined receiver.
# This file is part of GNSS-SDR.
#
# Copyright (C) 2010-2020 (see AUTHORS file for a list of contributors)
# SPDX-License-Identifier: GPL-3.0-or-later
# This script is used to compare the generic kernels to the highest performing kernel, for each operation
# Run:
# ./volk_gnsssdr_profile -j volk_gnsssdr_results.json
# Then run this script under python3
import matplotlib.pyplot as plt
import numpy as np
import json
filename = 'volk_gnsssdr_results.json'
operations = []
metrics = []
with open(filename) as json_file:
data = json.load(json_file)
for test in data['volk_gnsssdr_tests']:
if ('generic' in test['results']) or ('u_generic' in test['results']): # some dont have a generic kernel
operations.append(test['name'][13:]) # remove volk_gnsssdr_ prefix that they all have
extension_performance = []
for key, val in test['results'].items():
if key not in ['generic', 'u_generic']: # exclude generic results, when trying to find fastest time
extension_performance.append(val['time'])
try:
generic_time = test['results']['generic']['time']
except:
generic_time = test['results']['u_generic']['time']
metrics.append(extension_performance[np.argmin(extension_performance)]/generic_time)
plt.bar(np.arange(len(metrics)), metrics)
plt.hlines(1.0, -1, len(metrics), colors='r', linestyles='dashed')
plt.axis([-1, len(metrics), 0, 2])
plt.xticks(np.arange(len(operations)), operations, rotation=90)
plt.ylabel('Time taken of fastest kernel relative to generic kernel')
plt.show()
| gpl-3.0 |
ina-foss/ID-Fits | lib/stats.py | 1 | 1275 | # ID-Fits
# Copyright (c) 2015 Institut National de l'Audiovisuel, INA, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import numpy as np
import pylab
from matplotlib import pyplot as plt
def plotROC(rocs, labels, title=None, show_grid=True):
prev_figsize = pylab.rcParams['figure.figsize']
pylab.rcParams['figure.figsize'] = (8.0, 6.0)
for roc,label in zip(rocs,labels):
plt.plot(roc[0], roc[1], label=label)
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.legend(loc="lower right")
if title:
plt.title(title)
if show_grid:
plt.grid()
pylab.rcParams['figure.figsize'] = prev_figsize
| lgpl-3.0 |
NelisVerhoef/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
vermouthmjl/scikit-learn | examples/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| bsd-3-clause |
meduz/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 168 | 2088 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
colors = ['teal', 'yellowgreen', 'gold']
lw = 2
plt.plot(x_plot, f(x_plot), color='cornflowerblue', linewidth=lw,
label="ground truth")
plt.scatter(x, y, color='navy', s=30, marker='o', label="training points")
for count, degree in enumerate([3, 4, 5]):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, color=colors[count], linewidth=lw,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
eg-zhang/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
cbertinato/pandas | pandas/core/indexes/timedeltas.py | 1 | 26674 | """ implement the TimedeltaIndex """
from datetime import datetime
import warnings
import numpy as np
from pandas._libs import (
NaT, Timedelta, index as libindex, join as libjoin, lib)
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import (
_TD_DTYPE, ensure_int64, is_float, is_integer, is_list_like, is_scalar,
is_timedelta64_dtype, is_timedelta64_ns_dtype, pandas_dtype)
import pandas.core.dtypes.concat as _concat
from pandas.core.dtypes.missing import isna
from pandas.core.accessor import delegate_names
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays.timedeltas import TimedeltaArray, _is_convertible_to_td
from pandas.core.base import _shared_docs
import pandas.core.common as com
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.datetimelike import (
DatetimeIndexOpsMixin, DatetimelikeDelegateMixin, maybe_unwrap_index,
wrap_arithmetic_op)
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops import get_op_result_name
from pandas.tseries.frequencies import to_offset
def _make_wrapped_arith_op(opname):
meth = getattr(TimedeltaArray, opname)
def method(self, other):
result = meth(self._data, maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
method.__name__ = opname
return method
class TimedeltaDelegateMixin(DatetimelikeDelegateMixin):
# Most attrs are dispatched via datetimelike_{ops,methods}
# Some are "raw" methods, the result is not not re-boxed in an Index
# We also have a few "extra" attrs, which may or may not be raw,
# which we we dont' want to expose in the .dt accessor.
_delegate_class = TimedeltaArray
_delegated_properties = (TimedeltaArray._datetimelike_ops + [
'components',
])
_delegated_methods = TimedeltaArray._datetimelike_methods + [
'_box_values',
]
_raw_properties = {
'components',
}
_raw_methods = {
'to_pytimedelta',
}
@delegate_names(TimedeltaArray,
TimedeltaDelegateMixin._delegated_properties,
typ="property")
@delegate_names(TimedeltaArray,
TimedeltaDelegateMixin._delegated_methods,
typ="method", overwrite=False)
class TimedeltaIndex(DatetimeIndexOpsMixin, dtl.TimelikeOps, Int64Index,
TimedeltaDelegateMixin):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
which is an integer/float number
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation
copy : bool
Make a copy of input ndarray
start : starting value, timedelta-like, optional
If data is None, start is used as the start point in generating regular
timedelta data.
.. deprecated:: 0.24.0
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
.. deprecated:: 0.24.0
end : end time, timedelta-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
.. deprecated:: 0.24. 0
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
.. deprecated:: 0.24. 0
name : object
Name to be stored in the index
Attributes
----------
days
seconds
microseconds
nanoseconds
components
inferred_freq
Methods
-------
to_pytimedelta
to_series
round
floor
ceil
to_frame
mean
See Also
--------
Index : The base pandas Index type.
Timedelta : Represents a duration between two dates or times.
DatetimeIndex : Index of datetime64 data.
PeriodIndex : Index of Period data.
timedelta_range : Create a fixed-frequency TimedeltaIndex.
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Creating a TimedeltaIndex based on `start`, `periods`, and `end` has
been deprecated in favor of :func:`timedelta_range`.
"""
_typ = 'timedeltaindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(
joinf, dtype='m8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
_engine_type = libindex.TimedeltaEngine
_comparables = ['name', 'freq']
_attributes = ['name', 'freq']
_is_numeric_dtype = True
_infer_as_myclass = True
_freq = None
_box_func = TimedeltaArray._box_func
_bool_ops = TimedeltaArray._bool_ops
_object_ops = TimedeltaArray._object_ops
_field_ops = TimedeltaArray._field_ops
_datetimelike_ops = TimedeltaArray._datetimelike_ops
_datetimelike_methods = TimedeltaArray._datetimelike_methods
_other_ops = TimedeltaArray._other_ops
# -------------------------------------------------------------------
# Constructors
def __new__(cls, data=None, unit=None, freq=None, start=None, end=None,
periods=None, closed=None, dtype=_TD_DTYPE, copy=False,
name=None, verify_integrity=None):
if verify_integrity is not None:
warnings.warn("The 'verify_integrity' argument is deprecated, "
"will be removed in a future version.",
FutureWarning, stacklevel=2)
else:
verify_integrity = True
if data is None:
freq, freq_infer = dtl.maybe_infer_freq(freq)
warnings.warn("Creating a TimedeltaIndex by passing range "
"endpoints is deprecated. Use "
"`pandas.timedelta_range` instead.",
FutureWarning, stacklevel=2)
result = TimedeltaArray._generate_range(start, end, periods, freq,
closed=closed)
return cls._simple_new(result._data, freq=freq, name=name)
if is_scalar(data):
raise TypeError('{cls}() must be called with a '
'collection of some kind, {data} was passed'
.format(cls=cls.__name__, data=repr(data)))
if unit in {'Y', 'y', 'M'}:
warnings.warn("M and Y units are deprecated and "
"will be removed in a future version.",
FutureWarning, stacklevel=2)
if isinstance(data, TimedeltaArray):
if copy:
data = data.copy()
return cls._simple_new(data, name=name, freq=freq)
if (isinstance(data, TimedeltaIndex) and
freq is None and name is None):
if copy:
return data.copy()
else:
return data._shallow_copy()
# - Cases checked above all return/raise before reaching here - #
tdarr = TimedeltaArray._from_sequence(data, freq=freq, unit=unit,
dtype=dtype, copy=copy)
return cls._simple_new(tdarr._data, freq=tdarr.freq, name=name)
@classmethod
def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE):
# `dtype` is passed by _shallow_copy in corner cases, should always
# be timedelta64[ns] if present
if not isinstance(values, TimedeltaArray):
values = TimedeltaArray._simple_new(values, dtype=dtype,
freq=freq)
else:
if freq is None:
freq = values.freq
assert isinstance(values, TimedeltaArray), type(values)
assert dtype == _TD_DTYPE, dtype
assert values.dtype == 'm8[ns]', values.dtype
tdarr = TimedeltaArray._simple_new(values._data, freq=freq)
result = object.__new__(cls)
result._data = tdarr
result.name = name
# For groupby perf. See note in indexes/base about _index_data
result._index_data = tdarr._data
result._reset_identity()
return result
# -------------------------------------------------------------------
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super().__setstate__(state)
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
# -------------------------------------------------------------------
# Rendering Methods
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs):
from pandas.io.formats.format import Timedelta64Formatter
return Timedelta64Formatter(values=self,
nat_rep=na_rep,
justify='all').get_result()
# -------------------------------------------------------------------
# Wrapping TimedeltaArray
__mul__ = _make_wrapped_arith_op("__mul__")
__rmul__ = _make_wrapped_arith_op("__rmul__")
__floordiv__ = _make_wrapped_arith_op("__floordiv__")
__rfloordiv__ = _make_wrapped_arith_op("__rfloordiv__")
__mod__ = _make_wrapped_arith_op("__mod__")
__rmod__ = _make_wrapped_arith_op("__rmod__")
__divmod__ = _make_wrapped_arith_op("__divmod__")
__rdivmod__ = _make_wrapped_arith_op("__rdivmod__")
__truediv__ = _make_wrapped_arith_op("__truediv__")
__rtruediv__ = _make_wrapped_arith_op("__rtruediv__")
# Compat for frequency inference, see GH#23789
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
_is_unique = Index.is_unique
@property
def _box_func(self):
return lambda x: Timedelta(x, unit='ns')
def __getitem__(self, key):
result = self._data.__getitem__(key)
if is_scalar(result):
return result
return type(self)(result, name=self.name)
# -------------------------------------------------------------------
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_timedelta64_dtype(dtype) and not is_timedelta64_ns_dtype(dtype):
# Have to repeat the check for 'timedelta64' (not ns) dtype
# so that we can return a numeric index, since pandas will return
# a TimedeltaIndex when dtype='timedelta'
result = self._data.astype(dtype, copy=copy)
if self.hasnans:
return Index(result, name=self.name)
return Index(result.astype('i8'), name=self.name)
return DatetimeIndexOpsMixin.astype(self, dtype, copy=copy)
def _union(self, other, sort):
if len(other) == 0 or self.equals(other) or len(self) == 0:
return super()._union(other, sort=sort)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
this, other = self, other
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index._union(this, other, sort=sort)
if isinstance(result, TimedeltaIndex):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
if _is_convertible_to_index(other):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
return Index.join(self, other, how=how, level=level,
return_indexers=return_indexers,
sort=sort)
def intersection(self, other, sort=False):
"""
Specialized intersection for TimedeltaIndex objects.
May be much faster than Index.intersection
Parameters
----------
other : TimedeltaIndex or array-like
sort : False or None, default False
Sort the resulting index if possible.
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default to ``False`` to match the behaviour
from before 0.24.0.
.. versionchanged:: 0.25.0
The `sort` keyword is added
Returns
-------
y : Index or TimedeltaIndex
"""
return super().intersection(other, sort=sort)
def _wrap_joined_index(self, joined, other):
name = get_op_result_name(self, other)
if (isinstance(other, TimedeltaIndex) and self.freq == other.freq and
self._can_fast_union(other)):
joined = self._shallow_copy(joined, name=name)
return joined
else:
return self._simple_new(joined, name)
def _can_fast_union(self, other):
if not isinstance(other, TimedeltaIndex):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
def _maybe_promote(self, other):
if other.inferred_type == 'timedelta':
other = TimedeltaIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if _is_convertible_to_td(key):
key = Timedelta(key)
return self.get_value_maybe_box(series, key)
try:
return com.maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
if not isinstance(key, Timedelta):
key = Timedelta(key)
values = self._engine.get_value(com.values_from_object(series), key)
return com.maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if is_list_like(key) or (isinstance(key, datetime) and key is not NaT):
# GH#20464 datetime check here is to ensure we don't allow
# datetime objects to be incorrectly treated as timedelta
# objects; NaT is a special case because it plays a double role
# as Not-A-Timedelta
raise TypeError
if isna(key):
key = NaT
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
if _is_convertible_to_td(key):
key = Timedelta(key)
return Index.get_loc(self, key, method, tolerance)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timedelta(key)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
"""
assert kind in ['ix', 'loc', 'getitem', None]
if isinstance(label, str):
parsed = Timedelta(label)
lbound = parsed.round(parsed.resolution_string)
if side == 'left':
return lbound
else:
return (lbound + to_offset(parsed.resolution_string) -
Timedelta(1, 'ns'))
elif ((is_integer(label) or is_float(label)) and
not is_timedelta64_dtype(label)):
self._invalid_indexer('slice', label)
return label
def _get_string_slice(self, key):
if is_integer(key) or is_float(key) or key is NaT:
self._invalid_indexer('slice', key)
loc = self._partial_td_slice(key)
return loc
def _partial_td_slice(self, key):
# given a key, try to figure out a location for a partial slice
if not isinstance(key, str):
return key
raise NotImplementedError
@Substitution(klass='TimedeltaIndex')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, (np.ndarray, Index)):
value = np.array(value, dtype=_TD_DTYPE, copy=False)
else:
value = Timedelta(value).asm8.view(_TD_DTYPE)
return self.values.searchsorted(value, side=side, sorter=sorter)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'timedelta'
@property
def inferred_type(self):
return 'timedelta64'
@property
def is_all_dates(self):
return True
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
# try to convert if possible
if _is_convertible_to_td(item):
try:
item = Timedelta(item)
except Exception:
pass
elif is_scalar(item) and isna(item):
# GH 18295
item = self._na_value
freq = None
if isinstance(item, Timedelta) or (is_scalar(item) and isna(item)):
# check freq can be preserved on edge cases
if self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = Timedelta(item).asm8.view(_TD_DTYPE)
try:
new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
return self._shallow_copy(new_tds, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, str):
return self.astype(object).insert(loc, item)
raise TypeError(
"cannot insert TimedeltaIndex with incompatible label")
def delete(self, loc):
"""
Make a new TimedeltaIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : TimedeltaIndex
"""
new_tds = np.delete(self.asi8, loc)
freq = 'infer'
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
TimedeltaIndex._add_comparison_ops()
TimedeltaIndex._add_numeric_methods_unary()
TimedeltaIndex._add_logical_methods_disabled()
TimedeltaIndex._add_datetimelike_methods()
def _is_convertible_to_index(other):
"""
return a boolean whether I can attempt conversion to a TimedeltaIndex
"""
if isinstance(other, TimedeltaIndex):
return True
elif (len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer', 'integer',
'mixed-integer-float', 'mixed')):
return True
return False
def timedelta_range(start=None, end=None, periods=None, freq=None,
name=None, closed=None):
"""
Return a fixed frequency TimedeltaIndex, with day as the default
frequency
Parameters
----------
start : string or timedelta-like, default None
Left bound for generating timedeltas
end : string or timedelta-like, default None
Right bound for generating timedeltas
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'
name : string, default None
Name of the resulting TimedeltaIndex
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Returns
-------
rng : TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.timedelta_range(start='1 day', periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6H')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
"""
if freq is None and com._any_none(periods, start, end):
freq = 'D'
freq, freq_infer = dtl.maybe_infer_freq(freq)
tdarr = TimedeltaArray._generate_range(start, end, periods, freq,
closed=closed)
return TimedeltaIndex._simple_new(tdarr._data, freq=tdarr.freq, name=name)
| bsd-3-clause |
zmlabe/IceVarFigs | Scripts/SeaIce/plot_SIV_v2_PIOMAS.py | 1 | 5352 | """
Plots PIOMAS daily Sea Ice Volume for 1979-2019
Website : http://psc.apl.uw.edu/research/projects/arctic-sea-ice-volume-
anomaly/data/
Author : Zachary M. Labe
Date : 7 November 2019
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as c
import datetime
import cmocean
### Directory and time
directorydata = './Data/'
directoryfigure = './Figures/'
year,day,volume = np.loadtxt(directorydata +
'PIOMAS.vol.daily.1979.2018.Current.v2.1.dat.gz',
skiprows=1,unpack=True)
### Current time
day = list(map(int,day))
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
years = np.arange(1979,2020,1)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
currentdoy = now.timetuple().tm_yday
month = datetime.date(int(currentyr),int(currentmn)-1,int(currentdy)).strftime('%B')
### Reshape sea ice volumes arrays
currentyear = volume.copy()[-day[-1]:]
volumen = volume[:-day[-1]]
volumen = np.reshape(volumen,(volumen.shape[0]//365,365))
### Calculate mean volume
mean = np.nanmean(volumen,axis=0)
### x-coordinates
doy = np.arange(0,np.nanmax(day))
### Calculate minimum
minsiv = np.nanmin(volumen[:,day[-1]-1])
minyear = np.where(volumen[:,day[-1]-1] == minsiv)[0]
timeyr = years[minyear][0]
### Make plot
plt.rc('savefig', facecolor='black')
plt.rc('axes', edgecolor='darkgrey')
plt.rc('xtick', color='darkgrey')
plt.rc('ytick', color='darkgrey')
plt.rc('axes', labelcolor='darkgrey')
plt.rc('axes', facecolor='black')
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
### Return information
print('\n' 'PIOMAS -- Sea Ice Volume --', now.strftime("%Y-%m-%d %H:%M"), '\n' '\n')
print('Completed: Reading Data!')
print('Completed: Reshaping Data!' '\n' '\n')
print('Current Sea Ice Volume = %s [x1000 km^3]' % currentyear[-1])
print('Lowest previous record = %s --> %s [x1000 km^3]' % (timeyr,minsiv))
### Plot Arctic sea ice volume
fig = plt.figure()
ax = plt.subplot(111)
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
ax.tick_params('both',length=5.5,width=2,which='major')
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
### Labeling (subject to change!)
color=iter(cmocean.cm.balance(np.linspace(0.05,0.95,volumen.shape[0])))
for i in range(volumen.shape[0]):
if i == 333:
c = 'm'
l = 1.2
plt.plot(doy,volumen[i,:],c=c,zorder=3,linewidth=l,label='Year 2012')
elif i == 393:
c = 'gold'
l = 1.2
plt.plot(doy,volumen[i,:],c=c,zorder=3,linewidth=l,label='Year 2018')
else:
c=next(color)
l = 1.5
plt.plot(doy,volumen[i,:],c=c,zorder=1,linewidth=l,alpha=1)
if any([i==1,i==11,i==21,i==31]):
if i == 31:
plt.text(376,np.nanmean(volumen[i:i+9,-1],axis=0),r'\textbf{%ss}*' % years[i],
color=c,fontsize=9,ha='center',va='center')
else:
plt.text(374,np.nanmean(volumen[i:i+10,-1],axis=0),r'\textbf{%ss}' % years[i],
color=c,fontsize=9,ha='center',va='center')
### Plot Figure
plt.plot(doy[:day[-1]],currentyear,color='gold',linewidth=1.6,
label='Year 2019',zorder=6)
plt.scatter(day[-1]-1,currentyear[-1],
s=24,color='gold',zorder=4,marker='o')
### Organize axes
plt.ylabel(r'\textbf{Volume [$\times$1000 km$^{3}$]}',fontsize=16,
color='darkgrey')
xlabels = [r'Jan',r'Feb',r'Mar',r'Apr',r'May',r'Jun',r'Jul',
r'Aug',r'Sep',r'Oct',r'Nov',r'Dec',r'Jan']
plt.xticks(np.arange(0,361,30),xlabels,rotation=0,fontsize=9)
plt.xlim([0,360])
plt.yticks(np.arange(0,int(np.nanmax(volume)+2),3),
map(str,np.arange(0,
int(np.nanmax(volume)+2),3)),fontsize=10)
plt.ylim([0,int(np.nanmax(volume))])
### Squeeze figure
plt.subplots_adjust(bottom=0.15)
### Source data
plt.text(0.,1.7,r'\textbf{DATA:} PIOMAS v2.1 (Zhang and Rothrock, 2003)',
fontsize=5,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0.,0.9,r'\textbf{SOURCE:} http://psc.apl.uw.edu/research/projects/arctic-sea-ice-volume-anomaly/',
fontsize=5,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0.,0.1,r'\textbf{GRAPHIC:} Zachary Labe (@ZLabe)',
fontsize=5,rotation='horizontal',ha='left',color='darkgrey')
plt.text(day[-1]-14,currentyear[-1]-3.5,r'\textbf{%s}' % currentyr,fontsize=12,color='gold')
plt.title(r'\textbf{ARCTIC SEA ICE (1979-%s)}' % currentyr,
fontsize=25,color='w')
### Save figure
plt.savefig(directoryfigure + 'SIV_PIOMAS_September_v2.png',dpi=900)
print('\n' '\n' 'Completed: Figure plotted!')
| mit |
fivejjs/ibis | scripts/airline.py | 7 | 1268 | import ibis
import os
import pandas
def wrangle_csvs():
years = range(1987, 2009)
for year in years:
path = '%d.csv.bz2' % year
outpath = os.path.expanduser('~/data/%d_clean.csv' % year)
print 'Working on %s' % path
df = pandas.read_csv(path, compression='bz2')
df.to_csv(outpath, header=False, index=False,
float_format='%g', na_rep='\N')
schema = ibis.schema([
('year', 'int32'),
('month', 'int8'),
('day', 'int8'),
('dayofweek', 'int8'),
('dep_time', 'int32'),
('crs_dep_time', 'int32'),
('arr_time', 'int32'),
('crs_arr_time', 'int32'),
('carrier', 'string'),
('flight_num', 'int32'),
('tail_num', 'int32'),
('actual_elapsed_time', 'int32'),
('crs_elapsed_time', 'int32'),
('airtime', 'int32'),
('arrdelay', 'int32'),
('depdelay', 'int32'),
('origin', 'string'),
('dest', 'string'),
('distince', 'int32'),
('taxi_in', 'int32'),
('taxi_out', 'int32'),
('cancelled', 'int8'),
('cancellation_code', 'string'),
('diverted', 'int8'),
('carrier_delay', 'int32'),
('weather_delay', 'int32'),
('nas_delay', 'int32'),
('security_delay', 'int32'),
('late_aircraft_delay', 'int32')
])
| apache-2.0 |
virneo/opencog | opencog/embodiment/Monitor/monitor_widget.py | 17 | 6067 | #
# Widgets each is in charge of a mind agent within OAC.
# The main job of a widget is getting data from Plaza within OAC and drawing graphs
#
# @author: Zhenhua Cai, [email protected]
# @date: 2011-11-18
#
# @note: I borrowed some code from
# http://matplotlib.sourceforge.net/examples/user_interfaces/embedding_in_qt4.html
#
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from PyQt4 import QtGui, QtCore
import zmq
import json
from common import *
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 1.0
mpl.rcParams['font.size'] = 8.0
mpl.rcParams['axes.titlesize'] = 'large'
mpl.rcParams['legend.fancybox'] = True
mpl.rcParams['legend.fontsize'] = 'large'
mpl.rcParams['legend.shadow'] = False
mpl.rcParams['legend.numpoints'] = 1
mpl.rcParams['figure.facecolor'] = 'white'
# for black background
#mpl.rcParams['axes.facecolor'] = 'black'
#mpl.rcParams['axes.edgecolor'] = 'white'
mpl.rcParams['figure.subplot.left'] = 0.05 # the left side of the subplots of the figure
mpl.rcParams['figure.subplot.right'] = 0.95 # the right side of the subplots of the figure
mpl.rcParams['figure.subplot.bottom'] = 0.05 # the bottom of the subplots of the figure
mpl.rcParams['figure.subplot.top'] = 0.90 # the top of the subplots of the figure
class MonitorWidget(FigureCanvas):
""" Qt4 backend of matplot, which provides a canvas for plotting.
The actual plotting is done within the MonitorThread class automatically.
"""
clicked = QtCore.pyqtSignal()
def __init__(self, publish_endpoint, filter_key,
parent=None, width=5, height=4, dpi=100):
self.filter_key = filter_key
self.tick_interval = 0.01 # real time (in sec) = timestamp * tick_interval
# Initialize figure canvas
self.figure = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.figure.add_subplot(111)
self.axes.hold(True) # Axes would be cleared each time plot() is called
FigureCanvas.__init__(self, self.figure)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding
)
FigureCanvas.updateGeometry(self)
# Initialize variables related to graph
self.max_data_len = 50
# timestamps can be erratic, so we can't assume they'll be
# evenly spaced
self.max_time_period = 30000
self.has_initialized = False
# expected format for data to be plotted is:
# { "timestamp": [1,2,...]
# "label1" : [val1,val2,...],
# "label2" : [val1,val2,...],
# }
self.data_dict = {}
self.legend_list = []
# Create and start ZeroMQ subscriber thread
self.zmq_subscriber_thread = ZmqSubscriberThread(self,
publish_endpoint,
filter_key)
self.zmq_subscriber_thread.start()
# Initialize data, legend and table title of recording file
def initialize_data(self, json_dict):
self.initialTimeStamp = json_dict["timestamp"]
f = file(self.filter_key, "w")
f.write("time\t\t")
for k, v in json_dict.iteritems():
self.data_dict[k] = [v]
if k != "timestamp":
self.legend_list.append(k)
f.write(k+"\t\t")
f.write("\n")
f.close()
self.has_initialized = True
# Update data list and also record them in the external file named after filter_key
def update_data(self, json_dict):
self.latestTimeStamp = json_dict["timestamp"]
forgetFirst = False
f = file(self.filter_key, "a")
elapsed_time = (self.latestTimeStamp - self.initialTimeStamp) * self.tick_interval
f.write(str(elapsed_time) + "\t\t")
# forget old data
if self.latestTimeStamp - self.data_dict["timestamp"][0] > self.max_time_period or \
len(self.data_dict["timestamp"]) > self.max_data_len:
forgetFirst = True
for k, v in json_dict.iteritems():
self.data_dict[k].append(v)
if forgetFirst: self.data_dict[k].pop(0)
if k != "timestamp": f.write(str(v)+"\t\t")
f.write("\n")
f.close()
# Draw the graph on the widget
def draw_graph(self):
self.axes.clear()
max_t = max(self.data_dict["timestamp"])
t_minus = [(x - self.latestTimeStamp)*self.tick_interval for x in self.data_dict["timestamp"]]
for k in self.data_dict:
if k == "timestamp": continue
self.axes.plot(t_minus, self.data_dict[k], '-+')
leg = self.axes.legend(self.legend_list,
'upper left',
shadow=True)
self.axes.set_title(self.zmq_subscriber_thread.filter_key)
self.axes.grid(True)
self.axes.set_xlim(-self.max_time_period*self.tick_interval,0)
self.axes.set_ylim(0,1)
self.draw()
@pyqtSlot(dict)
def handle_data_update(self, json_dict):
"""
Process the data in json format
"""
if not self.has_initialized:
self.initialize_data(json_dict)
self.update_data(json_dict)
# Draw the graph only where no other graph is being rendered.
# In principle, the global lock is not necessary,
# however drawing graph is very CPU consuming,
# introduce this limit may make GUI more responsive
if self.isVisible():
# glb.gui_read_write_lock.lockForWrite()
self.draw_graph()
# glb.gui_read_write_lock.unlock()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.clicked.emit()
| agpl-3.0 |
rtidatascience/connected-nx-tutorial | notebooks/helpers.py | 1 | 1190 | import networkx as nx
from seaborn import color_palette, set_style, palplot
def create_color_map(G, attribute, seaborn_palette="colorblind"):
"""Return a list of hex color mappings for node attributes"""
attributes = [G.node[label][attribute] for label in G.nodes()]
# get the set of possible attributes
attributes_unique = list(set(attributes))
num_values = len(attributes_unique)
# generate color palette from seaborn
palette = color_palette(seaborn_palette, num_values).as_hex()
# create a mapping of attribute to color
color_map = dict(zip(attributes_unique, palette))
# map the attribute for each node to the color it represents
node_colors = [color_map[attribute] for attribute in attributes]
return node_colors, color_map, palette
def map_communities(G, communities):
"""Return a mapping of community membership from a community set tuple"""
community_map = {}
for node in G.nodes():
for i, comm in enumerate(communities):
if node in comm:
community_map[node] = i
if community_map.get(node, None) is None:
community_map[node] = None
return community_map
| mit |
walterreade/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
mattyowl/fitScalingRelation | fitScalingRelation/fitScalingRelationLib.py | 1 | 62047 | """
The MCMC fitting code used in Hilton et al. (2012), in a more general purpose form
Copyright 2015 Matt Hilton ([email protected])
This file is part of fitScalingRelation.
fitScalingRelation is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
fitScalingRelation is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with fitScalingRelation. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import math
import string
from astLib import *
import pylab as plt
import numpy as np
import astropy.table as atpy
import popen2
from scipy import stats
from scipy import special
from scipy import interpolate
from scipy import ndimage
import pyximport; pyximport.install()
import cythonScalingRelation as csr
import time
import pickle
import matplotlib
import IPython
np.random.seed()
plt.matplotlib.interactive(False)
# For some unknown reason, mathtext in matplotlib is behaving weirdly since Ubuntu 16.10 upgrade
#try:
#plt.matplotlib.rc('text', usetex=True)
#except:
#pass
#-------------------------------------------------------------------------------------------------------------
# Adopt Ed's cosmology
#astCalc.OMEGA_M0=0.27
#astCalc.OMEGA_L=0.73
#-------------------------------------------------------------------------------------------------------------
def ask_for( key ):
s = raw_input( "ParametersDict: enter value for '%s': " % key )
try:
val = eval(s)
except NameError:
# allow people to enter unquoted strings
val = s
return val
class ParametersDict( dict ):
def __getitem__( self, key ):
if key not in self:
print "ParametersDict: parameter '%s' not found" % key
val = ask_for( key )
print "ParametersDict: setting '%s' = %s" % (key,repr(val))
dict.__setitem__( self, key, val )
return dict.__getitem__( self, key )
def read_from_file( self, filename ):
f = open( filename )
old = ''
for line in f:
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
s = line.split('#')
line = s[0]
#if line[-1] == '\\':
#s = line.split('\\')
#if len(s) > 1:
#old = string.join([old, s[0]])
#continue
#else:
#line = string.join([old, s[0]])
#old = ''
##IPython.embed()
##sys.exit()
s = line.split('=')
if len(s) != 2:
print "Error parsing line:"
print line
IPython.embed()
sys.exit()
continue
try:
key = s[0].strip()
val = eval(s[1].strip()) # XXX:make safer
except:
raise Exception, "can't parse line: %s" % (line)
self[key] = val
f.close()
def write_to_file( self, filename, mode = 'w' ):
f = open( filename, mode )
keys = self.keys()
keys.sort()
for key in keys:
f.write( "%s = %s\n" % (key,repr(self[key])) )
f.close()
def cmp( self, otherDict ):
diff = []
ks = self.keys()
for k in ks:
try:
if otherDict[k] == self.params[k]:
continue
diff += [k]
break
except KeyError:
diff += [k]
return otherDict
#-------------------------------------------------------------------------------------------------------------
def selectStartParsFromPriors(settingsDict):
"""Choose random starting values for the MCMC from the priors we're placing on the parameters.
"""
variables=settingsDict['variables']
pars=np.zeros(len(variables))
for i in range(len(variables)):
v=variables[i]
if settingsDict['%sFit' % (v)] == 'fixed':
pars[i]=settingsDict['%s0' % (v)]
else:
pars[i]=np.random.uniform(settingsDict['prior_%s_MIN' % (v)], settingsDict['prior_%s_MAX' % (v)])
# This makes sure that if we're testing by swapping axes, we can use the same prior ranges
if 'swapAxes' in settingsDict.keys() and settingsDict['swapAxes'] == True:
b=1.0/pars[1]
a=-pars[0]/pars[1]
pars[0]=a
pars[1]=b
return pars
#-------------------------------------------------------------------------------------------------------------
def getPPrior(pPars, settingsDict):
"""Gets prior probability.
"""
variables=settingsDict['variables']
# This makes sure that if we're testing by swapping axes, we can use the same prior ranges
if 'swapAxes' in settingsDict.keys() and settingsDict['swapAxes'] == True:
b=1.0/pPars[1]
a=-pPars[0]/pPars[1]
pPars[0]=a
pPars[1]=b
priors=np.zeros(len(variables))
for i in range(len(variables)):
v=variables[i]
if pPars[i] > settingsDict['prior_%s_MIN' % (v)] and pPars[i] < settingsDict['prior_%s_MAX' % (v)]:
priors[i]=1.0
else:
priors[i]=0.0
# Fixed parameters must surely be within the priors...
if settingsDict['%sFit' % (v)] == 'fixed':
priors[i]=1.0
pPrior=np.product(priors)
return pPrior
#-------------------------------------------------------------------------------------------------------------
def byteSwapArr(arr):
"""FITS is big-endian, but cython likes native-endian arrays (little-endian for x86)... so, byteswap
if we need.
"""
if arr.dtype.byteorder == '>':
arr=arr.byteswap().newbyteorder('=')
return arr
#-------------------------------------------------------------------------------------------------------------
def sampleGetter(settingsDict, sampleDef, outDir):
"""Loads in catalogue in .fits table format, and add columns xToFit, yToFit, xErrToFit, yErrToFit,
which are fed into the MCMCFit routine. Applies any asked for scalings and cuts according to the
contents of settingsDict and sampleDef.
"""
# Stuff we need from settings...
xColumnName=settingsDict['xColumnName']
xPlusErrColumnName=settingsDict['xPlusErrColumnName']
xMinusErrColumnName=settingsDict['xMinusErrColumnName']
yColumnName=settingsDict['yColumnName']
yPlusErrColumnName=settingsDict['yPlusErrColumnName']
yMinusErrColumnName=settingsDict['yMinusErrColumnName']
xPivot=settingsDict['xPivot']
yPivot=settingsDict['yPivot']
xTakeLog10=settingsDict['xTakeLog10']
yTakeLog10=settingsDict['yTakeLog10']
redshiftColumnName=settingsDict['redshiftColumnName']
xScaleFactor=settingsDict['xScaleFactor']
yScaleFactor=settingsDict['yScaleFactor']
yScaleFactorPower=settingsDict['yScaleFactorPower']
newTab=atpy.Table().read(settingsDict['inFileName'])
# Make a new table here with cuts applied
# NOTE: we really need a better way of labelling constraints
for key in sampleDef:
if key not in ['label', 'plotLabel']:
if key[-4:] == '_MIN':
col=key[:-4]
newTab=newTab[np.where(newTab[col] > sampleDef[key])]
elif key[-4:] == '_MAX':
col=key[:-4]
newTab=newTab[np.where(newTab[col] < sampleDef[key])]
else:
if type(sampleDef[key]) != list:
newTab=newTab[np.where(newTab[key] == sampleDef[key])]
else:
print "Need to add more sampleDef key handling code"
IPython.embed()
sys.exit()
if len(newTab) == 0:
print "Hmm... all objects cut? empty newTab"
IPython.embed()
sys.exit()
# Value added useful columns
Ez=[]
for row in newTab:
Ez.append(astCalc.Ez(row[redshiftColumnName]))
newTab.add_column(atpy.Column(Ez, 'E(z)'))
# Add columns we will fit to, scaling and applying log10 as necessary
# We apply pivots here also (undo them, if necessary, elsewhere)
stab=newTab
# We should probably make this default
if xPivot == "median":
xPivot=np.median(newTab[xColumnName])
settingsDict['xPivot']=xPivot
if yPivot == "median":
yPivot=np.median(newTab[yColumnName])
settingsDict['yPivot']=yPivot
if yScaleFactor == "E(z)":
yScaling=np.power(stab["E(z)"], yScaleFactorPower)
elif yScaleFactor == None:
yScaling=np.ones(len(stab))
else:
raise Exception, "didn't understand yScaleFactor"
if xTakeLog10 == True:
xToFit=np.log10(stab[xColumnName]/xPivot)
xErrToFitPlus=np.log10((stab[xColumnName]+stab[xPlusErrColumnName])/xPivot)-xToFit
xErrToFitMinus=xToFit-np.log10((stab[xColumnName]-stab[xMinusErrColumnName])/xPivot)
else:
xToFit=stab[xColumnName]
xErrToFitPlus=stab[xPlusErrColumnName]
xErrToFitMinus=stab[xMinusErrColumnName]
if yTakeLog10 == True:
yToFit=np.log10(yScaling*stab[yColumnName]/yPivot)
yErrToFitPlus=np.log10(yScaling*(stab[yColumnName]+stab[yPlusErrColumnName])/yPivot)-yToFit
yErrToFitMinus=yToFit-np.log10(yScaling*(stab[yColumnName]-stab[yMinusErrColumnName])/yPivot)
else:
yToFit=stab[yColumnName]
yErrToFitPlus=stab[yPlusErrColumnName]
yErrToFitMinus=stab[yMinusErrColumnName]
# Swap
if xToFit.dtype.byteorder == '>':
xToFit=xToFit.byteswap().newbyteorder('=')
stab.add_column(atpy.Column(xToFit, 'xToFit'))
stab.add_column(atpy.Column(xErrToFitPlus, 'xErrToFitPlus'))
stab.add_column(atpy.Column(xErrToFitMinus, 'xErrToFitMinus'))
stab.add_column(atpy.Column(yToFit, 'yToFit'))
stab.add_column(atpy.Column(yErrToFitPlus, 'yErrToFitPlus'))
stab.add_column(atpy.Column(yErrToFitMinus, 'yErrToFitMinus'))
# If we ever get around to fiddling with detection probabilities again, change this...
if 'detPColumnName' in settingsDict.keys():
if settingsDict['detPColumnName'] != 'detP':
stab.add_column(atpy.Column(stab[settingsDict['detPColumnName']], 'detP'))
#stab['detP']=np.ones(len(stab))
#stab['detP']=stab['detP'].byteswap().newbyteorder()
#IPython.embed()
#sys.exit()
else:
stab.add_column(atpy.Column([1.0]*len(stab), 'detP'))
if 'ignoreSelectionFunction' in settingsDict.keys() and settingsDict['ignoreSelectionFunction'] == True:
stab['detP']=np.ones(len(stab))
if settingsDict['symmetriseErrors'] == True:
xAvErr=(stab['xErrToFitPlus']+stab['xErrToFitMinus'])/2.0
yAvErr=(stab['yErrToFitPlus']+stab['yErrToFitMinus'])/2.0
stab['xErrToFitPlus']=xAvErr
stab['xErrToFitMinus']=xAvErr
stab['yErrToFitPlus']=yAvErr
stab['yErrToFitMinus']=yAvErr
# Histograms of redshift and x property distribution, one above the other
# Fiddle with this later...
#print "plots"
#IPython.embed()
#sys.exit()
#fontDict={'size': 16}
#cols=1
#pylab.figure(figsize=(6, 8*cols))
#pylab.subplots_adjust(0.1, 0.06, 0.97, 0.97, 0.03, 0.12)
#pylab.subplot(2, 1, 1)
#pylab.hist(stab['redshift'], bins = numpy.linspace(0.0, 1.5, 16), histtype = 'stepfilled', color =
#'#A0A0A0', ec = '#A0A0A0')
#pylab.xlabel("$z$", fontdict = fontDict)
#pylab.ylabel("N", fontdict = fontDict)
#pylab.ylim(0, 60)
#pylab.subplot(2, 1, 2)
#pylab.hist(stab['temp'], bins = numpy.linspace(0, 12, 13), histtype = 'stepfilled', color =
#'#A0A0A0', ec = '#A0A0A0')
#pylab.xlabel("$T$ (keV)", fontdict = fontDict)
#pylab.ylabel("N", fontdict = fontDict)
##pylab.yticks(ylocs, [""]*len(ylabels))
#pylab.ylim(0, 60)
#pylab.savefig(outDir+os.path.sep+"zT_histograms.pdf")
#pylab.close()
return stab
#-------------------------------------------------------------------------------------------------------------
def MCMCFit(settingsDict, tab):
"""My attempt at fitting using MCMC and maximum likelihood.
settingsDict = dictionary containing MCMC parameters and settings
You can choose whether to use the likelihood for 'bisector' or 'orthogonal' fitting using the 'method' key.
"""
# Can now swap axes for testing purposes
if 'swapAxes' in settingsDict.keys():
swapAxes=settingsDict['swapAxes']
else:
swapAxes=False
print "... swapAxes = ", swapAxes
# Choice of method
method=settingsDict['method']
if method == 'orthogonal':
likelihood=csr.fastOrthogonalLikelihood
variables=['A', 'B', 'C', 'S']
numFreePars=4
elif method == 'bisector':
likelihood=csr.fastBisectorLikelihood
variables=['A', 'B', 'C', 'Sx', 'Sy']
numFreePars=5
settingsDict['variables']=variables # A handy place to store this for cutting down code elsewhere
scales=[]
for v in variables:
scales.append(settingsDict['%sScale' % (v)])
# Start by writing this in python, but calling the likelihood function in cython
# MCMC parameters
numSamples=settingsDict['numSamples'] # Total number of random steps over likelihood surface
burnSamples=settingsDict['burnSamples'] # Throw away initial bunch of this many samples
thinning=settingsDict['thinning'] # Keep only every ith sample - good in some ways, bad in others
# Choice of evolution models
if settingsDict['evoModel'] == '1+z':
log10RedshiftEvo=np.log10(tab[settingsDict['redshiftColumnName']]+1)
elif settingsDict['evoModel'] == 'E(z)':
log10RedshiftEvo=np.log10(tab['E(z)'])
else:
raise Exception, "didn't understand evoModel '%s'" % (evoModel)
#log10RedshiftEvo=np.array(log10RedshiftEvo, dtype = float)
# To start with, we're going to use the same proposal distribution for everything
# But later on we could dig out the correlated random numbers code to generate random parameter values that
# satisfy the covariance we see between parameters, which would speed things up.
cPars=selectStartParsFromPriors(settingsDict)
#print "... starting values [A, B, C, S] = [%.2f, %.2f, %.2f, %.2f]" % (cA, cB, cC, cS)
# Byte swapping festival to keep cython happy
yToFit=byteSwapArr(tab['yToFit'])
yErrToFitPlus=byteSwapArr(tab['yErrToFitPlus'])
yErrToFitMinus=byteSwapArr(tab['yErrToFitMinus'])
xToFit=byteSwapArr(tab['xToFit'])
xErrToFitPlus=byteSwapArr(tab['xErrToFitPlus'])
xErrToFitMinus=byteSwapArr(tab['xErrToFitMinus'])
detP=byteSwapArr(tab['detP'])
# Another thing... fix this later properly... but if everything isn't same data type, cython falls over
yToFit=np.array(tab['yToFit'], dtype = np.float64)
yErrToFitPlus=np.array(tab['yErrToFitPlus'], dtype = np.float64)
yErrToFitMinus=np.array(tab['yErrToFitMinus'], dtype = np.float64)
xToFit=np.array(tab['xToFit'], dtype = np.float64)
xErrToFitPlus=np.array(tab['xErrToFitPlus'], dtype = np.float64)
xErrToFitMinus=np.array(tab['xErrToFitMinus'], dtype = np.float64)
log10RedshiftEvo=np.array(log10RedshiftEvo, dtype = np.float64)
detP=np.array(tab['detP'], dtype = np.float64)
if swapAxes == False:
try:
cProb, probArray=likelihood(cPars, yToFit, yErrToFitPlus, yErrToFitMinus, xToFit, xErrToFitPlus,
xErrToFitMinus, log10RedshiftEvo, detP)
except:
print "byte swapping problem?"
IPython.embed()
sys.exit()
else:
cProb, probArray=likelihood(cPars, xToFit, xErrToFitPlus, xErrToFitMinus, yToFit, yErrToFitPlus,
yErrToFitMinus, log10RedshiftEvo, detP)
if cProb == 0:
raise Exception, "initial position in MCMC chain has zero probability - change initial values/fiddle with priors in .par file?"
allPars=[] # == 'the Markov chain'
likelihoods=[]
# Metropolis-Hastings (actually just Metropolis since our candidate distribution is symmetric)
for k in range(numSamples):
# Progress update
tenPercent=numSamples/10
for j in range(0,11):
if k == j*tenPercent:
print "... "+str(j*10)+"% complete ..."
pPars=makeProposal(cPars, scales, settingsDict)
if swapAxes == False:
pProb, probArray=likelihood(pPars, yToFit, yErrToFitPlus, yErrToFitMinus, xToFit, xErrToFitPlus,
xErrToFitMinus, log10RedshiftEvo, detP)
else:
pProb, probArray=likelihood(pPars, xToFit, xErrToFitPlus, xErrToFitMinus, yToFit, yErrToFitPlus,
yErrToFitMinus, log10RedshiftEvo, detP)
if np.isinf(pProb) == True:
print "Hmm - infinite probability?"
IPython.embed()
sys.exit()
# Changed below because we're now dealing with log10 probabilities instead of the actual numbers
alpha=pProb-cProb
acceptProposal=False
if alpha > 0:
acceptProposal=True
else:
U=math.log10(np.random.uniform(0, 1))
if U <= alpha:
acceptProposal=True
# Our prior is uniform, so we're really just using it to force the answer into a range
# i.e. if it's not 1.0, then something has strayed out of the box.
pPrior=getPPrior(pPars, settingsDict)
if acceptProposal == True and pPrior == 1.0:
cPars=pPars
cProb=pProb
# Only keep samples after burning in and also thin as we go along
if k > burnSamples and k % thinning == 0:
# If we want to plot the trace (i.e. to check mixing) then we want to store these always in some fashion
# As it is, we're only keeping the ones that are drawn from the probability distributions
allPars.append(cPars)
likelihoods.append(pProb)
allPars=np.array(allPars)
likelihoods=np.array(likelihoods)
# If we swap axes, it's just easier to transform back into a form we know
if 'swapAxes' in settingsDict.keys() and settingsDict['swapAxes'] == True:
a=-allPars[:, 0]/allPars[:, 1]
b=1.0/allPars[:, 1]
allPars[:, 0]=a
allPars[:, 1]=b
# Gewerke test to check if the chain has converged
# If z < 2 then we're converged
index10Percent=int(len(allPars)*0.1)
index50Percent=int(len(allPars)*0.5)
mean10Percent=allPars[:index10Percent].mean(axis = 0)
mean50Percent=allPars[::-1][:index50Percent].mean(axis = 0)
var10Percent=allPars[:index10Percent].var(axis = 0)
var50Percent=allPars[::-1][:index50Percent].var(axis = 0)
zStatistic=(mean10Percent-mean50Percent)/np.sqrt(var10Percent+var50Percent)
zStatistic=np.nan_to_num(zStatistic)
# Zap entries in here that are fixed (avoids round off or div 0 making them look large when we don't care)
for i in range(len(variables)):
v=variables[i]
if settingsDict['%sFit' % (v)] == 'fixed':
zStatistic[i]=0.0
numFreePars=numFreePars-1
# Max likelihood values are simply the mean of the values in the probability distribution
# 1-sigma errors are similarly easy (could also use calc1SigmaError routine, but this is quicker)
resultsDict={}
for i in range(len(variables)):
v=variables[i]
resultsDict['%s' % (v)]=allPars[:, i].mean()
resultsDict['%sErr' % (v)]=calc68Percentile(allPars[:, i])
# Scott's translation of orthogonal scatter S into scatter in y-variable at fixed x-variable
if method == 'orthogonal':
s=allPars[:, 3]/np.cos(np.arctan(allPars[:, 1]))
resultsDict['s']=s.mean()
resultsDict['sErr']=calc68Percentile(s)
# We have numFreePars above
lnL=np.log(np.power(10, likelihoods))
resultsDict['AIC']=2*numFreePars-2*lnL.max()
resultsDict['AICc']=resultsDict['AIC']+(2*numFreePars*(numFreePars+1))/(float(len(tab))-numFreePars-1)
resultsDict['pars']=allPars
resultsDict['zStatistic']=zStatistic
# chi-sq
#yMod=(xToFit*resultsDict['B'])+resultsDict['A']+resultsDict['C']*log10RedshiftEvo
#chiSq=np.sum(np.power(yToFit-yMod, 2)/np.power(yErrToFitPlus, 2))
#resultsDict['chiSq']=chiSq
#print "check chiSq"
#IPython.embed()
#sys.exit()
return resultsDict
#-------------------------------------------------------------------------------------------------------------
def makeProposal(pars, scales, settingsDict):
"""Generates random set of parameters in format [A, B, C, S] for feeding into likelihood function.
Proposal distributions are assumed Gaussian with scales [AScale, BScale, CScale, SScale].
"""
# This makes sure that if we're testing by swapping axes, we can use the same prior scales
# To the same space as our scales
if 'swapAxes' in settingsDict.keys() and settingsDict['swapAxes'] == True:
b=1.0/pars[1]
a=-pars[0]/pars[1]
pars[0]=a
pars[1]=b
prop=np.random.normal(pars, scales)
# And back...
if 'swapAxes' in settingsDict.keys() and settingsDict['swapAxes'] == True:
b=1.0/prop[1]
a=-prop[0]/prop[1]
prop[0]=a
prop[1]=b
# Force scatters +ve
prop[3:]=abs(prop[3:])
if settingsDict['AFit'] == 'fixed':
prop[0]=settingsDict['A0']
if settingsDict['BFit'] == 'fixed':
prop[1]=settingsDict['B0']
if settingsDict['CFit'] == 'fixed':
prop[2]=settingsDict['C0']
if settingsDict['method'] == 'orthogonal':
if settingsDict['SFit'] == 'fixed':
prop[3]=settingsDict['S0']
elif settingsDict['method'] == 'bisector':
if settingsDict['SxFit'] == 'fixed':
prop[3]=settingsDict['Sx0']
if settingsDict['SyFit'] == 'fixed':
prop[4]=settingsDict['Sy0']
return prop
#-------------------------------------------------------------------------------------------------------------
def make1DProbDensityPlots(fitResults, settingsDict, outDir):
"""Makes 1D plots of probability density distributions
"""
sigmaScale=5.0
bins=30
variables=settingsDict['variables']
axes=range(len(variables))
# Individual plots
#for v, a in zip(variables, axes):
#if settingsDict['%sFit' % (v)] == 'free':
#x=np.linspace(fitResults['%s' % (v)]-sigmaScale*fitResults['%sErr' % (v)],
#fitResults['%s' % (v)]+sigmaScale*fitResults['%sErr' % (v)], bins)
#P1D=LTCythonMCMC.fast1DProbProjection(x, a, fitResults['pars'])
#make1DPlot(x, P1D, '%s' % (v), '%s = %.3f $\pm$ %.3f' % (v, fitResults['%s' % (v)], fitResults['%sErr' % (v)]),
#outDir+os.path.sep+"1DProb_%s.pdf" % (v))
# Make an uber plot with multiple panels
cols=0
for v, a in zip(variables, axes):
if settingsDict['%sFit' % (v)] == 'free':
cols=cols+1
plt.figure(figsize=(4.5*cols, 3.94))
plt.subplots_adjust(0.02, 0.12, 0.98, 0.92, 0.1, 0.1)
count=0
for v, a in zip(variables, axes):
if settingsDict['%sFit' % (v)] == 'free':
count=count+1
x=np.linspace(fitResults['%s' % (v)]-sigmaScale*fitResults['%sErr' % (v)],
fitResults['%s' % (v)]+sigmaScale*fitResults['%sErr' % (v)], bins)
P1D=csr.fast1DProbProjection(x, a, fitResults['pars'])
P1D=P1D/P1D.max()
plt.subplot(1, cols, count)
ax=plt.gca()
y=P1D
fitLabel='%s = %.3f $\pm$ %.3f' % (v, fitResults['%s' % (v)], fitResults['%sErr' % (v)])
xLabel='%s' % (v)
plt.plot(x, y, 'k-', label = fitLabel)
plt.xlabel(xLabel, fontdict = {'size': 14})
plt.ylabel("")
plt.yticks([], [])
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(6))
plt.ylim(0, 1.2)
leg=plt.legend(prop = {'size': 12})
leg.draw_frame(False)
plt.draw()
plt.savefig(outDir+os.path.sep+"1DProb_allPars.pdf")
plt.close()
#-------------------------------------------------------------------------------------------------------------
def make1DPlot(x, y, xLabel, fitLabel, outFileName):
"""Actually makes the 1D probability plots
"""
plt.plot(x, y, label = fitLabel)
plt.xlabel(xLabel)
plt.ylabel("")
plt.legend()
plt.savefig(outFileName)
plt.close()
#-------------------------------------------------------------------------------------------------------------
def makeContourPlots(fitResults, outDir, sampleLabel):
"""This takes fit results and turns it into contour plots.
"""
mlA, mlAErr=fitResults['A'], fitResults['AErr']
mlB, mlBErr=fitResults['B'], fitResults['BErr']
mlC, mlCErr=fitResults['C'], fitResults['CErr']
mlS, mlSErr=fitResults['S'], fitResults['SErr']
pars=fitResults['pars']
# Make 2d contour plots of valid combinations, determined by if they have a non null 1 sigma error
As=np.linspace(mlA-5.0*mlAErr-math.fmod(mlA-5.0*mlAErr, 0.1), mlA+7.0*mlAErr-math.fmod(mlA+7.0*mlAErr, 0.1), 81)
Bs=np.linspace(mlB-5.0*mlBErr-math.fmod(mlB-5.0*mlBErr, 0.1), mlB+7.0*mlBErr-math.fmod(mlB+7.0*mlBErr, 0.1), 81)
Cs=np.linspace(mlC-5.0*mlCErr-math.fmod(mlC-5.0*mlCErr, 0.1), mlC+7.0*mlCErr-math.fmod(mlC+7.0*mlCErr, 0.1), 81)
Ss=np.linspace(mlS-5.0*mlSErr-math.fmod(mlS-5.0*mlSErr, 0.05), mlS+7.0*mlSErr-math.fmod(mlS+7.0*mlSErr, 0.05), 81)
if mlAErr > 0 and mlBErr > 0:
outFileName=outDir+os.path.sep+"contours_AvB_"+sampleLabel+".pdf"
PDist2D=csr.fast2DProbProjection(As, Bs, 0, 1, pars)
astImages.saveFITS(outFileName.replace(".pdf", ".fits"), PDist2D, None)
probContourPlot(As, Bs, "A", "B", 0.1, 0.1, mlA, mlB, mlAErr, mlBErr, PDist2D, outFileName)
if mlAErr > 0 and mlCErr > 0:
outFileName=outDir+os.path.sep+"contours_AvC_"+sampleLabel+".pdf"
PDist2D=csr.fast2DProbProjection(As, Cs, 0, 2, pars)
probContourPlot(As, Cs, "A", "C", 0.1, 0.5, mlA, mlC, mlAErr, mlCErr, PDist2D, outFileName)
astImages.saveFITS(outFileName.replace(".pdf", ".fits"), PDist2D, None)
if mlAErr > 0 and mlSErr > 0:
outFileName=outDir+os.path.sep+"contours_AvS_"+sampleLabel+".pdf"
PDist2D=csr.fast2DProbProjection(As, Ss, 0, 3, pars)
probContourPlot(As, Ss, "A", "S", 0.1, 0.05, mlA, mlS, mlAErr, mlSErr, PDist2D, outFileName)
astImages.saveFITS(outFileName.replace(".pdf", ".fits"), PDist2D, None)
if mlBErr > 0 and mlCErr > 0:
outFileName=outDir+os.path.sep+"contours_BvC_"+sampleLabel+".pdf"
PDist2D=csr.fast2DProbProjection(Bs, Cs, 1, 2, pars)
probContourPlot(Bs, Cs, "B", "C", 0.1, 0.5, mlB, mlC, mlBErr, mlCErr, PDist2D, outFileName)
astImages.saveFITS(outFileName.replace(".pdf", ".fits"), PDist2D, None)
#-------------------------------------------------------------------------------------------------------------
def probContourPlot(par1Values, par2Values, par1Label, par2Label, par1TickStep, par2TickStep, mlPar1, mlPar2,
mlPar1Err, mlPar2Err, PDist2D, outFileName):
"""Make a 2d contour plot of probability surface of given parameters.
par1Values = values for parameter 1 (plotted on Y axis)
par2Values = values for parameter 2 (plotted on X axis)
par1Label = text label for Y axis
par2Label = text label for X axis
par1TickStep = tick step along Y axis
par2TickStep = tick step along X axis
mlPar1 = maximum likelihood value for parameter 1
mlPar2 = maximum likelihood value for parameter 2
mlPar1Err = 1d 1-sigma error in parameter 1
mlPar2Err = 1d 1-sigma error in parameter 2
PDist2D = 2d likelihood surface, made using fast2DProbProjection
"""
tck1=interpolate.splrep(par1Values, np.arange(par1Values.shape[0]))
par1TickLabels=np.arange(par1Values.min(), par1Values.max(), par1TickStep)
par1TickIndices=interpolate.splev(par1TickLabels, tck1)
plt.yticks(par1TickIndices, par1TickLabels)
tck2=interpolate.splrep(par2Values, np.arange(par2Values.shape[0]))
par2TickLabels=np.arange(par2Values.min(), par2Values.max(), par2TickStep)
par2TickIndices=interpolate.splev(par2TickLabels, tck2)
plt.xticks(par2TickIndices, par2TickLabels)
# We have to smooth to get decent looking contours
# Gaussian smoothing preserves the normalisation
# NOTE: smoothing only needed if very fine grid
PDist2D=ndimage.gaussian_filter(PDist2D, 1)
# Work out where to put contours
sigma1Level=calc2DProbThreshold(PDist2D, 0.683)
sigma2Level=calc2DProbThreshold(PDist2D, 0.95)
plt.contour(PDist2D, [sigma1Level, sigma2Level], colors = 'b')
# Save plot - trim down area first (?) and add axes labels
plt.plot(interpolate.splev(mlPar2, tck2), interpolate.splev(mlPar1, tck1), 'r*',
label = "%s = %.2f $\pm$ %.2f, %s = %.2f $\pm$ %.2f" % (par1Label, mlPar1, mlPar1Err, par2Label, mlPar2, mlPar2Err))
plt.legend(numpoints = 1)
plt.xlabel(par2Label)
plt.ylabel(par1Label)
if outFileName != None:
plt.savefig(outFileName)
plt.close()
#-------------------------------------------------------------------------------------------------------------
def calc1SigmaError(par1d, prob1d, mlParValue):
"""Calculates 1d 1-sigma error on a parameter (marginalised, is the word I'm looking for I think) relative
to the maximum likelihood value.
NOTE: Now we're using MCMC, the regular calc68Percentile routine below works just fine, and is quicker
than this.
"""
norm=np.trapz(prob1d, par1d)
prob1d=prob1d/norm
tckPDist=interpolate.splrep(par1d, prob1d)
target=0.683 # 1 sigma
dRange=np.linspace(0.0, par1d.max()-mlParValue, 1000) # we need to wok out how to choose sensible values
bestDiff=1e6
dBest=1e6
for d in dRange:
integrationRange=np.linspace(mlParValue-d, mlParValue+d, 1000)
diff=abs(target-np.trapz(interpolate.splev(integrationRange, tckPDist), integrationRange))
if diff < bestDiff:
bestDiff=diff
dBest=d
return dBest
#-------------------------------------------------------------------------------------------------------------
def calc2DProbThreshold(PDist2D, probThresh):
"""Calculates threshold probability per pixel in PDist2D needed to draw confidence contours at e.g.
1-sigma, 2-sigma level
"""
p=PDist2D.flatten()
p.sort()
p=p[::-1]
pCumSum=p.cumsum()
diff=abs(pCumSum-probThresh)
pIndex=diff.tolist().index(diff.min())
pLevel=p[pIndex]
return pLevel
#------------------------------------------------------------------------------------------------------------
def calc68Percentile(arr):
"""Calculates the 68-percentile (i.e. equivalent to 1-sigma error) from an array.
"""
res=np.abs(arr-np.median(arr))
res=np.sort(res)
index=int(round(0.683*arr.shape[0]))
try:
err=res[index]
except:
print "index error?"
IPython.embed()
sys.exit()
return err
#-------------------------------------------------------------------------------------------------------------
def makeScalingRelationPlot(sampleTab, fitResults, outDir, sampleDict, settingsDict):
"""Make a scaling relation plot.
sampleDict = the dictionary defining the sample (e.g. min z, max z etc.)
"""
# Stuff we need from settings...
xColumnName=settingsDict['xColumnName']
xPlusErrColumnName=settingsDict['xPlusErrColumnName']
xMinusErrColumnName=settingsDict['xMinusErrColumnName']
yColumnName=settingsDict['yColumnName']
yPlusErrColumnName=settingsDict['yPlusErrColumnName']
yMinusErrColumnName=settingsDict['yMinusErrColumnName']
xPivot=settingsDict['xPivot']
xTakeLog10=settingsDict['xTakeLog10']
yTakeLog10=settingsDict['yTakeLog10']
redshiftColumnName=settingsDict['redshiftColumnName']
xScaleFactor=settingsDict['xScaleFactor']
yScaleFactor=settingsDict['yScaleFactor']
yScaleFactorPower=settingsDict['yScaleFactorPower']
# The plot
plt.figure(figsize=(10, 10))
plt.axes([0.1, 0.1, 0.85, 0.85])
if yScaleFactor != None:
yPlot=np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yColumnName]
yPlotErrs=np.array([np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yMinusErrColumnName],
np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yPlusErrColumnName]])
else:
yPlot=sampleTab[yColumnName]
yPlotErrs=np.array([sampleTab[yMinusErrColumnName],
sampleTab[yPlusErrColumnName]])
plt.errorbar(sampleTab[xColumnName], yPlot,
yerr = yPlotErrs,
xerr = np.array([sampleTab[xMinusErrColumnName],
sampleTab[xPlusErrColumnName]]),
fmt = 'kD', mec = 'k', label = sampleDict['label']+" (N=%d)" % (len(sampleTab)))
if xTakeLog10 == True and yTakeLog10 == True:
plt.loglog()
elif xTakeLog10 == True and yTakeLog10 == False:
plt.semilogx()
elif xTakeLog10 == False and yTakeLog10 == True:
plt.semilogy()
#cmdata=np.outer(np.linspace(0, 1, 10), np.linspace(0, 1, 10)) # to easily make a colorbar 0-1
#cmim=plt.imshow(cmdata, cmap = "gray")
#ax=plt.axes([0.1, 0.17, 0.85, 0.78])
if np.sum(np.equal(sampleTab['detP'], 1.0)) == len(sampleTab):
shadeByDetP=False
else:
shadeByDetP=True
if shadeByDetP == True:
for row, pY in zip(sampleTab, yPlot):
plt.plot(row[xColumnName], [pY], 'D', color = (row['detP'], row['detP'], row['detP']))
plotRange=np.linspace(settingsDict['xPlotMin'], settingsDict['xPlotMax'], 100)
if xTakeLog10 == True and yTakeLog10 == True:
yFit=settingsDict['yPivot']*np.power(10, fitResults['A'])*np.power((plotRange/xPivot), fitResults['B'])
elif xTakeLog10 == False and yTakeLog10 == False:
yFit=settingsDict['yPivot']*(fitResults['A']+fitResults['B']*(plotRange/xPivot))
else:
raise Exception, "add semilogx, semilogy fit line code"
if xPivot != 1.0:
fitLabel='%s (%s) = 10$^{%.2f \pm %.2f}$ (%s/%.1f %s)$^{%.2f \pm %.2f}$' % (settingsDict['yPlotLabel'], settingsDict['yPlotLabelUnits'], fitResults['A'], fitResults['AErr'], settingsDict['xPlotLabel'], xPivot, settingsDict['xPlotLabelUnits'], fitResults['B'], fitResults['BErr'])
else:
fitLabel='%s (%s) = 10$^{%.2f \pm %.2f}$ (%s)$^{%.2f \pm %.2f}$' % (settingsDict['yPlotLabel'], settingsDict['yPlotLabelUnits'], fitResults['A'], fitResults['AErr'], settingsDict['xPlotLabel'], fitResults['B'], fitResults['BErr'])
yLabel="%s (%s)" % (settingsDict['yPlotLabel'], settingsDict['yPlotLabelUnits'])
if settingsDict['yScaleFactor'] == "E(z)":
fitLabel="$E^{%d}(z)$ " % (settingsDict['yScaleFactorPower'])+fitLabel
yLabel="$E^{%d}(z)$ " % (settingsDict['yScaleFactorPower'])+yLabel
plt.plot(plotRange, yFit, 'b--', label = fitLabel)
## Below is just diagnostic
#if sampleLabel == 'REXCESS':
#prattLabel='$L_{\sf X}$ (erg s$^{-1}$) = 10$^{44.85 \pm 0.06}$ ($T/5.0$ keV)$^{3.35 \pm 0.32}$'
#prattLabel="$E^{-1}(z)$ "+prattLabel
#prattLabel="P09: "+prattLabel
#prattLX=np.power(10, 44.85)*np.power((plotRange/5.0), 3.35)
#plt.plot(plotRange, prattLX, 'r:', label = prattLabel)
#sample['plotLabel']=""
plt.ylabel(yLabel, size = 16)
plt.xlabel("%s (%s)" % (settingsDict['xPlotLabel'], settingsDict['xPlotLabelUnits']), size = 16)
plt.xlim(settingsDict['xPlotMin'], settingsDict['xPlotMax'])
plt.ylim(settingsDict['yPlotMin'], settingsDict['yPlotMax'])
if settingsDict['showPlotLegend'] == True:
leg=plt.legend(loc = 'upper left', prop = {'size': 16}, scatterpoints = 1, numpoints = 1)
leg.draw_frame(False)
plt.draw()
ax=plt.gca()
plt.text(0.95, 0.05, sampleDict['plotLabel'], ha = 'right', va = 'center', transform = ax.transAxes,
fontdict = {"size": 16, "linespacing" : 1.2, 'family': 'serif'})
outFileName=outDir+os.path.sep+"scalingRelation_%s_%s.pdf" % (yColumnName, xColumnName)
plt.savefig(outFileName)
plt.close()
#-------------------------------------------------------------------------------------------------------------
def makeScalingRelationPlot_ABC(sampleTab, fitResults, outDir, sampleDict, settingsDict, mode = 'normal'):
"""Make a scaling relation plot with y values scaling by normalisation and z evolution.
sampleDict = the dictionary defining the sample (e.g. min z, max z etc.)
"""
# Stuff we need from settings...
xColumnName=settingsDict['xColumnName']
xPlusErrColumnName=settingsDict['xPlusErrColumnName']
xMinusErrColumnName=settingsDict['xMinusErrColumnName']
yColumnName=settingsDict['yColumnName']
yPlusErrColumnName=settingsDict['yPlusErrColumnName']
yMinusErrColumnName=settingsDict['yMinusErrColumnName']
xPivot=settingsDict['xPivot']
xTakeLog10=settingsDict['xTakeLog10']
yTakeLog10=settingsDict['yTakeLog10']
redshiftColumnName=settingsDict['redshiftColumnName']
xScaleFactor=settingsDict['xScaleFactor']
yScaleFactor=settingsDict['yScaleFactor']
yScaleFactorPower=settingsDict['yScaleFactorPower']
# The plot...
if yScaleFactor != None:
yPlot=np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yColumnName]
yPlotErrs=np.array([np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yMinusErrColumnName],
np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yPlusErrColumnName]])
else:
yPlot=sampleTab[yColumnName]
yPlotErrs=np.array([sampleTab[yMinusErrColumnName],
sampleTab[yPlusErrColumnName]])
fitLabel='%s = 10$^{%.2f \pm %.2f}$ (%s/%d)$^{%.2f \pm %.2f}$' % (settingsDict['yPlotLabel'], fitResults['A'], fitResults['AErr'], settingsDict['xPlotLabel'], xPivot, fitResults['B'], fitResults['BErr'])
yLabel="%s (%s)" % (settingsDict['yPlotLabel'], settingsDict['yPlotLabelUnits'])
if settingsDict['evoModel'] == '1+z':
yPlot=np.power(sampleTab[redshiftColumnName]+1, -fitResults['C'])*yPlot
yPlotErrs=np.power(sampleTab[redshiftColumnName]+1, -fitResults['C'])*yPlotErrs
fitLabel=fitLabel+' (1+$z$)$^{%s}$' % (fitResults['plotLabel_C'])
yLabel=yLabel.replace("(%s)" % (settingsDict['yPlotLabelUnits']), "(1+$z$)$^{%.1f}$ (%s)" % (-1*fitResults['C'], settingsDict['yPlotLabelUnits']))
elif settingsDict['evoModel'] == 'E(z)':
yPlot=np.power(sampleTab['E(z)'], -fitResults['C'])*yPlot
yPlotErrs=np.power(sampleTab['E(z)'], -fitResults['C'])*yPlotErrs
fitLabel=fitLabel+' $E(z)^{%s}$' % (fitResults['plotLabel_C'])
yLabel=yLabel.replace("(%s)" % (settingsDict['yPlotLabelUnits']), "$E(z)^{%.1f}$ (%s)" % (-1*fitResults['C'], settingsDict['yPlotLabelUnits']))
if settingsDict['yScaleFactor'] == "E(z)":
fitLabel="$E^{%d}(z)$ " % (settingsDict['yScaleFactorPower'])+fitLabel
yLabel="$E^{%d}(z)$ " % (settingsDict['yScaleFactorPower'])+yLabel
if mode == 'normal':
plt.figure(figsize=(8, 8))
ax=plt.axes([0.11, 0.1, 0.86, 0.85])
plotRange=np.linspace(0.1*sampleTab[xColumnName].min(), 10*sampleTab[xColumnName].max(), 100)
yFit=np.power(10, fitResults['A'])*np.power((plotRange/xPivot), fitResults['B'])
plt.plot(plotRange, yFit, 'b--', label = fitLabel)
outFileName=outDir+os.path.sep+"scalingRelation_%s_%s_ABC.pdf" % (settingsDict['yColumnName'], settingsDict['xColumnName'])
# Old
#plt.errorbar(sampleTab['temp'], plotLXs,
#yerr = plotLXErrs,
#xerr = np.array([sampleTab['temp_min'],
#sampleTab['temp_max']]),
#fmt = 'kD', mec = 'k', label = sampleLabel+" (N=%d)" % (len(sampleTab)))
# New (coding by redshift)
zBins=[[0.0, 0.25], [0.25, 0.5], [0.5, 1.5]]
labels=["0.0 < $z$ < 0.25", "0.25 < $z$ < 0.5", "0.5 < $z$ < 1.5"]
#colours=['k', [0.5, 0, 1], [1, 0.5, 0]]
colours=['k', 'c', 'r']
symbols=['D', 'o', '^']
for zBin, col, s, l in zip(zBins, colours, symbols, labels):
mask=np.logical_and(np.greater(sampleTab[redshiftColumnName], zBin[0]), np.less_equal(sampleTab[redshiftColumnName], zBin[1]))
plt.errorbar(sampleTab[xColumnName][mask], yPlot[mask],
yerr = yPlotErrs[:, mask],
xerr = np.array([sampleTab[xMinusErrColumnName][mask],
sampleTab[xPlusErrColumnName][mask]]),
fmt = s, ecolor = col, mfc = col, mec = col, label = l)
elif mode == 'PDetCoded':
plotRange=np.linspace(0.1, 22.0, 100)
fitLXs=np.power(10, fitResults['A'])*np.power((plotRange/pivotT), fitResults['B'])
#fitLabel='$L_{\sf X}$ (erg s$^{-1}$) = 10$^{%.2f \pm %.2f}$ ($T/%.1f$ keV)$^{%.2f \pm %.2f}$ (1+$z$)$^{%.2f \pm %.2f}$' % (fitResults['A'], fitResults['AErr'], pivotT, fitResults['B'], fitResults['BErr'], fitResults['C'], fitResults['CErr'])
plt.plot(plotRange, fitLXs, 'b--', label = fitLabel)
outFileName=outDir+os.path.sep+"L-T_ABC_PDetCoded.pdf"
plt.figure(figsize=(8, 8))
plt.axes([0.5, 0.5, 0.1, 0.1])
cmdata=np.outer(np.linspace(0, 1, 10), np.linspace(0, 1, 10)) # to easily make a colorbar 0-1
cmim=plt.imshow(cmdata, cmap = "gray")
ax=plt.axes([0.1, 0.17, 0.85, 0.78])
for row, pLX in zip(sampleTab, plotLXs):
plt.plot(row['temp'], [pLX], 'D', color = (row['detP'], row['detP'], row['detP']))
cmax=plt.axes([0.1, 0.075, 0.85, 0.1], frameon=False)
plt.xticks([], [])
plt.yticks([], [])
plt.colorbar(cmim, orientation = 'v', aspect = 40.0)
plt.figtext(0.52, 0.03, "P$_{\sf det}$", va = 'center', ha = 'center')
plt.axes(ax)
else:
raise Exception, "didn't understand mode"
plt.loglog()
plt.ylabel(yLabel, size = 16)
plt.xlabel("%s (%s)" % (settingsDict['xPlotLabel'], settingsDict['xPlotLabelUnits']), size = 16)
plt.xlim(settingsDict['xPlotMin'], settingsDict['xPlotMax'])
plt.ylim(settingsDict['yPlotMin'], settingsDict['yPlotMax'])
#leg=plt.legend(loc = 'upper left', prop = {'size': 16}, scatterpoints = 1, numpoints = 1)
#leg.draw_frame(False)
plt.draw()
ax=plt.gca()
plt.text(0.95, 0.05, sampleDict['plotLabel'], ha = 'right', va = 'center', transform = ax.transAxes,
fontdict = {"size": 16, "linespacing" : 1.2, 'family': 'serif'})
plt.savefig(outFileName)
plt.close()
#-------------------------------------------------------------------------------------------------------------
def makeScalingRelationPlots_sideBySide(sampleDefs, outDir, settingsDict):
"""Makes side by side subpanel plots of all the scaling relations in sampleDefs
"""
# Stuff we need from settings...
xColumnName=settingsDict['xColumnName']
xPlusErrColumnName=settingsDict['xPlusErrColumnName']
xMinusErrColumnName=settingsDict['xMinusErrColumnName']
yColumnName=settingsDict['yColumnName']
yPlusErrColumnName=settingsDict['yPlusErrColumnName']
yMinusErrColumnName=settingsDict['yMinusErrColumnName']
xPivot=settingsDict['xPivot']
xTakeLog10=settingsDict['xTakeLog10']
yTakeLog10=settingsDict['yTakeLog10']
redshiftColumnName=settingsDict['redshiftColumnName']
xScaleFactor=settingsDict['xScaleFactor']
yScaleFactor=settingsDict['yScaleFactor']
yScaleFactorPower=settingsDict['yScaleFactorPower']
# Make an uber plot with multiple panels
# NOTE: add adjustable layout later...
cols=len(sampleDefs)
plt.figure(figsize=(6*cols, 6))
plt.subplots_adjust(0.05, 0.1, 0.99, 0.99, 0.02, 0.02)
count=0
for s in sampleDefs:
sampleTab=s['stab']
fitResults=s['fitResults']
count=count+1
plt.subplot(1, cols, count)
if yScaleFactor != None:
yPlot=np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yColumnName]
yPlotErrs=np.array([np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yMinusErrColumnName],
np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yPlusErrColumnName]])
else:
yPlot=sampleTab[yColumnName]
yPlotErrs=np.array([sampleTab[yMinusErrColumnName],
sampleTab[yPlusErrColumnName]])
plt.errorbar(sampleTab[xColumnName], yPlot,
yerr = yPlotErrs,
xerr = np.array([sampleTab[xMinusErrColumnName],
sampleTab[xPlusErrColumnName]]),
fmt = 'kD', mec = 'k', label = s['label']+" (N=%d)" % (len(sampleTab)))
plt.loglog()
plotRange=np.linspace(0.1*sampleTab[xColumnName].min(), 10*sampleTab[xColumnName].max(), 100)
yFit=settingsDict['yPivot']*np.power(10, fitResults['A'])*np.power((plotRange/xPivot), fitResults['B'])
fitLabel='%s (%s) = 10$^{%.2f \pm %.2f}$ (%s/%.1f %s)$^{%.2f \pm %.2f}$' % (settingsDict['yPlotLabel'], settingsDict['yPlotLabelUnits'], fitResults['A'], fitResults['AErr'], settingsDict['xPlotLabel'], xPivot, settingsDict['xPlotLabelUnits'], fitResults['B'], fitResults['BErr'])
yLabel="%s (%s)" % (settingsDict['yPlotLabel'], settingsDict['yPlotLabelUnits'])
if settingsDict['yScaleFactor'] == "E(z)":
fitLabel="$E^{%d}(z)$ " % (settingsDict['yScaleFactorPower'])+fitLabel
yLabel="$E^{%d}(z)$ " % (settingsDict['yScaleFactorPower'])+yLabel
plt.plot(plotRange, yFit, 'b--', label = fitLabel)
plt.ylabel(yLabel, size = 16)
plt.xlabel("%s (%s)" % (settingsDict['xPlotLabel'], settingsDict['xPlotLabelUnits']), size = 16)
ax=plt.gca()
plt.text(0.95, 0.05, s['plotLabel'], ha = 'right', va = 'center', transform = ax.transAxes,
fontdict = {"size": 16, "linespacing" : 1.2, 'family': 'serif'})
if count > 1:
ylocs, ylabels=plt.yticks()
plt.ylabel("")
plt.yticks(ylocs, [""]*len(ylabels))
plt.xlim(settingsDict['xPlotMin'], settingsDict['xPlotMax'])
plt.ylim(settingsDict['yPlotMin'], settingsDict['yPlotMax'])
outFileName=outDir+os.path.sep+"scalingRelation_multiPlot_%s_%s.pdf" % (yColumnName, xColumnName)
plt.savefig(outFileName)
plt.close()
#-------------------------------------------------------------------------------------------------------------
def makeRoundedPlotLabelStrings(fitResults, variables, numSigFig = 1):
"""Add plot labels to fitResults, to given number of sig fig, taking care of rounding
NOTE: disabled the rounding for now
"""
# Not rounding, just dp not sf
dps=[2, 2, 1, 3, 3]
for p, dp in zip(variables, dps):
if fitResults['%sErr' % (p)] != 0:
fmt="%."+str(dp)+"f"
valStr=fmt % (fitResults['%s' % (p)])
errStr=fmt % (fitResults['%sErr' % (p)])
fitResults['plotLabel_%s' % (p)]="%s \pm %s" % (valStr, errStr)
#-------------------------------------------------------------------------------------------------------------
def makeNormEvoPlot(stab, fitResults, outDir, settingsDict):
"""Makes plot of evolution of the normalisation.
"""
zs=np.linspace(0, 2.0, 100)
Ez=[]
for z in zs:
Ez.append(astCalc.Ez(z))
Ez=np.array(Ez)
plt.figure(figsize=(8,6))
plt.axes([0.13, 0.1, 0.85, 0.86])
xColumnName=settingsDict['xColumnName']
yColumnName=settingsDict['yColumnName']
redshiftColumnName=settingsDict['redshiftColumnName']
yLabel="%s / %s$_{Fit (z=0)}$" % (settingsDict['yPlotLabel'], settingsDict['yPlotLabel'])
# If we have applied E(z)^{some power}, we want to plot that expected scaling,
# as well as a null line for no evolution
if settingsDict['yScaleFactor'] == 'E(z)':
dataNormalisation=((np.power(stab['E(z)'], settingsDict['yScaleFactorPower'])*stab[yColumnName])/np.power(stab[xColumnName]/settingsDict['xPivot'], fitResults['B']))/np.power(10, fitResults['A'])
nullLine=np.power(Ez, settingsDict['yScaleFactorPower']) # because E(z)^{some power} is flat in this form, null line is not
yScalingLine=np.ones(len(Ez)) # because we've scaled it out it's flat
yLabel="($E^{-1}(z)$ %s) / %s$_{Fit (z=0)}$" % (settingsDict['yPlotLabel'], settingsDict['yPlotLabel'])
else:
dataNormalisation=(stab[yColumnName]/np.power(stab[xColumnName]/settingsDict['xPivot'], fitResults['B']))/np.power(10, fitResults['A'])
nullLine=np.zeros(len(Ez))
yScalingLine=None
yLabel="%s / %s$_{Fit (z=0)}$" % (settingsDict['yPlotLabel'], settingsDict['yPlotLabel'])
dataLabel='%s$_{Fit (z=0)}$ = (%s/%d)$^{%.2f}$ / 10$^{%.2f}$' % (settingsDict['yPlotLabel'], settingsDict['xPlotLabel'], settingsDict['xPivot'], fitResults['B'], fitResults['A'])
if settingsDict['yScaleFactor'] == 'E(z)':
# Look for fractions
if settingsDict['yScaleFactorPower'] == -1:
yScalingLineLabel='$E(z)$'
elif abs(settingsDict['yScaleFactorPower']) == 2/3.0:
yScalingLineLabel='$E(z)$'
powerFactor=settingsDict['yScaleFactorPower']
# Need to swap power, remember we scaled these out...
if powerFactor > 0:
yScalingLineLabel=yScalingLineLabel+"$^{-2/3}$"
else:
yScalingLineLabel=yScalingLineLabel+"$^{2/3}$"
else:
print "yScalingLineLabel fraction handling?"
IPython.embed()
sys.exit()
plt.plot(stab[redshiftColumnName], dataNormalisation, 'kD', label = dataLabel)
if np.any(yScalingLine) != None:
plt.plot(zs, yScalingLine, 'b--', label = yScalingLineLabel, lw = 2)
plt.plot(zs, nullLine, 'g-.', label = 'no evolution', lw = 2)
if settingsDict['evoModel'] == '1+z':
plt.plot(zs, np.power(1+zs, fitResults['C']), 'r', lw = 2, label = '(1+z)$^{%.2f \pm %.2f}$' % (fitResults['C'], fitResults['CErr']))
shadedX=np.linspace(0, 2.0, 100)
shadedYPlus=np.power(shadedX+1, fitResults['C']+fitResults['CErr'])
shadedYMinus=np.power(shadedX+1, fitResults['C']-fitResults['CErr'])
elif settingsDict['evoModel'] == 'E(z)':
plt.plot(zs, np.power(Ez, fitResults['C']), 'r', lw = 2, label = '$E(z)^{%.2f \pm %.2f}$' % (fitResults['C'], fitResults['CErr']))
shadedX=np.linspace(0, 2.0, len(Ez))
shadedYPlus=np.power(Ez, fitResults['C']+fitResults['CErr'])
shadedYMinus=np.power(Ez, fitResults['C']-fitResults['CErr'])
if fitResults['C'] < 0:
loc="upper right"
else:
loc="lower left"
leg=plt.legend(loc = loc, prop = {'size': 14}, numpoints = 1)
leg.draw_frame(False)
plt.draw()
plt.xlabel("$z$", fontdict = {'size': 20})
plt.ylabel(yLabel, fontdict = {'size': 20})
xs=shadedX.tolist()+shadedX[::-1].tolist()
ys=shadedYPlus.tolist()+shadedYMinus[::-1].tolist()
plt.fill(xs, ys, 'b', alpha=0.2, edgecolor='none', label = "None", lw = 0.1)
plt.semilogy()
#plt.loglog()
plt.xlim(0, 1.6)
plt.ylim(1e-2, 1e2)
plt.savefig(outDir+os.path.sep+"normEvo_%s_%s.pdf" % (yColumnName, xColumnName))
plt.close()
#-------------------------------------------------------------------------------------------------------------
def makePaperContourPlots(fitResults, parDict, outDir):
"""Special case of plots, for 4 parameter fits, for the paper.
"""
if 'S' not in fitResults.keys():
print "... using bisector method - 2D contour plots disabled ..."
return None
mlA, mlAErr=fitResults['A'], fitResults['AErr']
mlB, mlBErr=fitResults['B'], fitResults['BErr']
mlC, mlCErr=fitResults['C'], fitResults['CErr']
mlS, mlSErr=fitResults['S'], fitResults['SErr']
pars=fitResults['pars']
# We only want to go on if we have a full set...
if mlAErr == 0 or mlBErr == 0 or mlCErr == 0 or mlSErr == 0:
return None
plt.figure(figsize=(10, 10))
plt.subplots_adjust(0.08, 0.07, 0.97, 0.97, 0.0, 0.0)
# Make 2d contour plots of valid combinations, determined by if they have a non null 1 sigma error
# NOTE: here steps have to be smaller than AStep, BStep, CStep, SStep below
# NOTE: any strange numbers in here are fiddling to get non-overlapping plot labels
As=np.linspace(mlA-5.0*mlAErr-math.fmod(mlA-5.0*mlAErr, 0.1), mlA+5.0*mlAErr-math.fmod(mlA+5.0*mlAErr, 0.1), 81)
Bs=np.linspace(mlB-5.0*mlBErr-math.fmod(mlB-5.0*mlBErr, 0.1), mlB+5.0*mlBErr-math.fmod(mlB+5.0*mlBErr, 0.1), 81)
Cs=np.linspace(mlC-5.0*mlCErr-math.fmod(mlC-5.0*mlCErr, 0.1), mlC+5.0*mlCErr-math.fmod(mlC+5.0*mlCErr, 0.1), 81)
Ss=np.linspace(mlS-5.0*mlSErr-math.fmod(mlS-5.0*mlSErr, 0.01), mlS+5.0*mlSErr-math.fmod(mlS+5.0*mlSErr, 0.01), 81)
# Steps for tick label plotting adjustment
AStep=0.2
BStep=0.4
CStep=1.0
SStep=0.02
# Bottom row
# AB
plt.subplot(4, 4, 15)
PDist2D=csr.fast2DProbProjection(As, Bs, 0, 1, pars)
probContourPlot_subPlot(As, Bs, "A", "B", AStep, BStep, mlA, mlB, mlAErr, mlBErr, PDist2D, noYLabels = True)
# AC
plt.subplot(4, 4, 14)
PDist2D=csr.fast2DProbProjection(As, Cs, 0, 2, pars)
probContourPlot_subPlot(As, Cs, "A", "C", AStep, CStep, mlA, mlC, mlAErr, mlCErr, PDist2D, noYLabels = True)
# AS
plt.subplot(4, 4, 13)
PDist2D=csr.fast2DProbProjection(As, Ss, 0, 3, pars)
probContourPlot_subPlot(As, Ss, "A", "S", AStep, SStep, mlA, mlS, mlAErr, mlSErr, PDist2D)
# Middle row
# BC
plt.subplot(4, 4, 10)
PDist2D=csr.fast2DProbProjection(Bs, Cs, 1, 2, pars)
probContourPlot_subPlot(Bs, Cs, "B", "C", BStep, CStep, mlB, mlC, mlBErr, mlCErr, PDist2D, noXLabels = True, noYLabels = True)
# BS
plt.subplot(4, 4, 9)
PDist2D=csr.fast2DProbProjection(Bs, Ss, 1, 3, pars)
probContourPlot_subPlot(Bs, Ss, "B", "S", BStep, SStep, mlB, mlS, mlBErr, mlSErr, PDist2D, noXLabels = True)
# Top row
# CS
plt.subplot(4, 4, 5)
PDist2D=csr.fast2DProbProjection(Cs, Ss, 2, 3, pars)
probContourPlot_subPlot(Cs, Ss, "C", "S", CStep, SStep, mlC, mlS, mlCErr, mlSErr, PDist2D, noXLabels = True)
# 1D plots
# S
plt.subplot(4, 4, 1)
PDist1D=csr.fast1DProbProjection(Ss, 3, pars)
probPlot1D_subPlot(Ss, "S", SStep, mlS, mlSErr, PDist1D, fitResults['plotLabel_S'], noYLabels = True, noXLabels = True)
# C
plt.subplot(4, 4, 6)
PDist1D=csr.fast1DProbProjection(Cs, 2, pars)
probPlot1D_subPlot(Cs, "C", CStep, mlC, mlCErr, PDist1D, fitResults['plotLabel_C'], noYLabels = True, noXLabels = True)
# B
plt.subplot(4, 4, 11)
PDist1D=csr.fast1DProbProjection(Bs, 1, pars)
probPlot1D_subPlot(Bs, "B", BStep, mlB, mlBErr, PDist1D, fitResults['plotLabel_B'], noYLabels = True, noXLabels = True)
# A
plt.subplot(4, 4, 16)
PDist1D=csr.fast1DProbProjection(As, 0, pars)
probPlot1D_subPlot(As, "A", AStep, mlA, mlAErr, PDist1D, fitResults['plotLabel_A'], noYLabels = True, noXLabels = False)
plt.savefig(outDir+os.path.sep+"2DProb_allPars.pdf")
plt.close()
#-------------------------------------------------------------------------------------------------------------
def probPlot1D_subPlot(par1Values, par1Label, par1TickStep, mlPar1, mlPar1Err, PDist1D, resultLabel,
noXLabels = False, noYLabels = False):
"""Make a 1d contour plot of marginalised probability for a parameter.
par1Values = values for parameter 1 (plotted on Y axis)
par1Label = text label for Y axis
par1TickStep = tick step along Y axis
mlPar1 = maximum likelihood value for parameter 1
mlPar1Err = 1d 1-sigma error in parameter 1
PDist1D = 1d prob distribution for parameter 1
"""
par1TickLabels=np.arange(par1Values.min(), par1Values.max(), par1TickStep)
plt.xticks(par1TickLabels, par1TickLabels)
PDist1D=PDist1D/PDist1D.max()
ax=plt.gca()
fitLabel='%s = %s' % (par1Label, resultLabel.replace("\pm", "$\pm$"))
plt.plot(par1Values, PDist1D, 'k-', label = fitLabel)
plt.ylabel("")
plt.yticks([], [])
#ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(6))
plt.ylim(0, 1.2)
leg=plt.legend(loc = (0.0, 0.86), prop = {'size': 12})
leg.draw_frame(False)
plt.draw()
plt.xlabel(par1Label)
if noYLabels == True:
ylocs, ylabels=plt.yticks()
plt.ylabel("")
plt.yticks(ylocs, [""]*len(ylabels))
if noXLabels == True:
xlocs, xlabels=plt.xticks()
plt.xlabel("")
plt.xticks(xlocs, [""]*len(xlabels))
#-------------------------------------------------------------------------------------------------------------
def probContourPlot_subPlot(par1Values, par2Values, par1Label, par2Label, par1TickStep, par2TickStep, mlPar1, mlPar2,
mlPar1Err, mlPar2Err, PDist2D, noXLabels = False, noYLabels = False):
"""Make a 2d contour plot of probability surface of given parameters. Somewhat needless duplication of
code, for makePaperContourPlots
par1Values = values for parameter 1 (plotted on Y axis)
par2Values = values for parameter 2 (plotted on X axis)
par1Label = text label for Y axis
par2Label = text label for X axis
par1TickStep = tick step along Y axis
par2TickStep = tick step along X axis
mlPar1 = maximum likelihood value for parameter 1
mlPar2 = maximum likelihood value for parameter 2
mlPar1Err = 1d 1-sigma error in parameter 1
mlPar2Err = 1d 1-sigma error in parameter 2
PDist2D = 2d likelihood surface, made using fast2DProbProjection
"""
tck1=interpolate.splrep(par1Values, np.arange(par1Values.shape[0]))
par1TickLabels=np.arange(par1Values.min(), par1Values.max(), par1TickStep)
par1TickIndices=interpolate.splev(par1TickLabels, tck1)
plt.yticks(par1TickIndices, par1TickLabels)
tck2=interpolate.splrep(par2Values, np.arange(par2Values.shape[0]))
par2TickLabels=np.arange(par2Values.min(), par2Values.max(), par2TickStep)
par2TickIndices=interpolate.splev(par2TickLabels, tck2)
plt.xticks(par2TickIndices, par2TickLabels)
# We have to smooth to get decent looking contours
# Gaussian smoothing preserves the normalisation
# NOTE: smoothing only needed if very fine grid
PDist2D=ndimage.gaussian_filter(PDist2D, 1)
# Work out where to put contours
sigma1Level=calc2DProbThreshold(PDist2D, 0.683)
sigma2Level=calc2DProbThreshold(PDist2D, 0.95)
# Apparently, we need to switch the order in newer versions of matplotlib
try:
plt.contour(PDist2D, [sigma2Level, sigma1Level], colors = 'k')
except:
print "contour problem"
IPython.embed()
sys.exit()
# Save plot - trim down area first (?) and add axes labels
plt.plot(interpolate.splev(mlPar2, tck2), interpolate.splev(mlPar1, tck1), 'k*',
label = "%s = %.2f $\pm$ %.2f, %s = %.2f $\pm$ %.2f" % (par1Label, mlPar1, mlPar1Err, par2Label, mlPar2, mlPar2Err))
#plt.legend(numpoints = 1)
plt.xlabel(par2Label)
plt.ylabel(par1Label)
if noYLabels == True:
ylocs, ylabels=plt.yticks()
plt.ylabel("")
plt.yticks(ylocs, [""]*len(ylabels))
if noXLabels == True:
xlocs, xlabels=plt.xticks()
plt.xlabel("")
plt.xticks(xlocs, [""]*len(xlabels))
| gpl-3.0 |
nmartensen/pandas | pandas/tests/groupby/test_whitelist.py | 2 | 8350 | """
test methods relating to generic function evaluation
the so-called white/black lists
"""
import pytest
from string import ascii_lowercase
import numpy as np
from pandas import DataFrame, Series, compat, date_range, Index, MultiIndex
from pandas.util import testing as tm
from pandas.compat import lrange, product
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
AGG_FUNCTIONS_WITH_SKIPNA = ['skew', 'mad']
df_whitelist = frozenset([
'last',
'first',
'mean',
'sum',
'min',
'max',
'head',
'tail',
'cumcount',
'ngroup',
'resample',
'rank',
'quantile',
'fillna',
'mad',
'any',
'all',
'take',
'idxmax',
'idxmin',
'shift',
'tshift',
'ffill',
'bfill',
'pct_change',
'skew',
'plot',
'hist',
'median',
'dtypes',
'corrwith',
'corr',
'cov',
'diff',
])
s_whitelist = frozenset([
'last',
'first',
'mean',
'sum',
'min',
'max',
'head',
'tail',
'cumcount',
'ngroup',
'resample',
'rank',
'quantile',
'fillna',
'mad',
'any',
'all',
'take',
'idxmax',
'idxmin',
'shift',
'tshift',
'ffill',
'bfill',
'pct_change',
'skew',
'plot',
'hist',
'median',
'dtype',
'corr',
'cov',
'diff',
'unique',
'nlargest',
'nsmallest',
])
@pytest.fixture
def mframe():
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
return DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
@pytest.fixture
def df():
return DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
@pytest.fixture
def df_letters():
letters = np.array(list(ascii_lowercase))
N = 10
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
return df
@pytest.mark.parametrize(
"obj, whitelist", zip((df_letters(), df_letters().floats),
(df_whitelist, s_whitelist)))
def test_groupby_whitelist(df_letters, obj, whitelist):
df = df_letters
# these are aliases so ok to have the alias __name__
alias = {'bfill': 'backfill',
'ffill': 'pad',
'boxplot': None}
gb = obj.groupby(df.letters)
assert whitelist == gb._apply_whitelist
for m in whitelist:
m = alias.get(m, m)
if m is None:
continue
f = getattr(type(gb), m)
# name
try:
n = f.__name__
except AttributeError:
continue
assert n == m
# qualname
if compat.PY3:
try:
n = f.__qualname__
except AttributeError:
continue
assert n.endswith(m)
@pytest.fixture
def raw_frame():
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
raw_frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
raw_frame.iloc[1, [1, 2]] = np.nan
raw_frame.iloc[7, [0, 1]] = np.nan
return raw_frame
@pytest.mark.parametrize(
"op, level, axis, skipna",
product(AGG_FUNCTIONS,
lrange(2), lrange(2),
[True, False]))
def test_regression_whitelist_methods(raw_frame, op, level, axis, skipna):
# GH6944
# explicity test the whitelest methods
if axis == 0:
frame = raw_frame
else:
frame = raw_frame.T
if op in AGG_FUNCTIONS_WITH_SKIPNA:
grouped = frame.groupby(level=level, axis=axis)
result = getattr(grouped, op)(skipna=skipna)
expected = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
tm.assert_frame_equal(result, expected)
else:
grouped = frame.groupby(level=level, axis=axis)
result = getattr(grouped, op)()
expected = getattr(frame, op)(level=level, axis=axis)
tm.assert_frame_equal(result, expected)
def test_groupby_blacklist(df_letters):
df = df_letters
s = df_letters.floats
blacklist = [
'eval', 'query', 'abs', 'where',
'mask', 'align', 'groupby', 'clip', 'astype',
'at', 'combine', 'consolidate', 'convert_objects',
]
to_methods = [method for method in dir(df) if method.startswith('to_')]
blacklist.extend(to_methods)
# e.g., to_csv
defined_but_not_allowed = ("(?:^Cannot.+{0!r}.+{1!r}.+try using the "
"'apply' method$)")
# e.g., query, eval
not_defined = "(?:^{1!r} object has no attribute {0!r}$)"
fmt = defined_but_not_allowed + '|' + not_defined
for bl in blacklist:
for obj in (df, s):
gb = obj.groupby(df.letters)
msg = fmt.format(bl, type(gb).__name__)
with tm.assert_raises_regex(AttributeError, msg):
getattr(gb, bl)
def test_tab_completion(mframe):
grp = mframe.groupby(level='second')
results = set([v for v in dir(grp) if not v.startswith('_')])
expected = set(
['A', 'B', 'C', 'agg', 'aggregate', 'apply', 'boxplot', 'filter',
'first', 'get_group', 'groups', 'hist', 'indices', 'last', 'max',
'mean', 'median', 'min', 'ngroups', 'nth', 'ohlc', 'plot',
'prod', 'size', 'std', 'sum', 'transform', 'var', 'sem', 'count',
'nunique', 'head', 'describe', 'cummax', 'quantile',
'rank', 'cumprod', 'tail', 'resample', 'cummin', 'fillna',
'cumsum', 'cumcount', 'ngroup', 'all', 'shift', 'skew',
'take', 'tshift', 'pct_change', 'any', 'mad', 'corr', 'corrwith',
'cov', 'dtypes', 'ndim', 'diff', 'idxmax', 'idxmin',
'ffill', 'bfill', 'pad', 'backfill', 'rolling', 'expanding'])
assert results == expected
def test_groupby_function_rename(mframe):
grp = mframe.groupby(level='second')
for name in ['sum', 'prod', 'min', 'max', 'first', 'last']:
f = getattr(grp, name)
assert f.__name__ == name
def test_groupby_selection_with_methods(df):
# some methods which require DatetimeIndex
rng = date_range('2014', periods=len(df))
df.index = rng
g = df.groupby(['A'])[['C']]
g_exp = df[['C']].groupby(df['A'])
# TODO check groupby with > 1 col ?
# methods which are called as .foo()
methods = ['count',
'corr',
'cummax',
'cummin',
'cumprod',
'describe',
'rank',
'quantile',
'diff',
'shift',
'all',
'any',
'idxmin',
'idxmax',
'ffill',
'bfill',
'pct_change',
'tshift']
for m in methods:
res = getattr(g, m)()
exp = getattr(g_exp, m)()
# should always be frames!
tm.assert_frame_equal(res, exp)
# methods which aren't just .foo()
tm.assert_frame_equal(g.fillna(0), g_exp.fillna(0))
tm.assert_frame_equal(g.dtypes, g_exp.dtypes)
tm.assert_frame_equal(g.apply(lambda x: x.sum()),
g_exp.apply(lambda x: x.sum()))
tm.assert_frame_equal(g.resample('D').mean(), g_exp.resample('D').mean())
tm.assert_frame_equal(g.resample('D').ohlc(),
g_exp.resample('D').ohlc())
tm.assert_frame_equal(g.filter(lambda x: len(x) == 3),
g_exp.filter(lambda x: len(x) == 3))
| bsd-3-clause |
h2educ/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
DinoCow/airflow | setup.py | 2 | 27515 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Setup.py for the Airflow project."""
import logging
import os
import subprocess
import sys
import unittest
from os.path import dirname
from textwrap import wrap
from typing import Dict, Iterable, List
from setuptools import Command, Distribution, find_namespace_packages, setup
logger = logging.getLogger(__name__)
version = '2.0.0'
PY3 = sys.version_info[0] == 3
my_dir = dirname(__file__)
def airflow_test_suite():
"""Test suite for Airflow tests"""
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')
return test_suite
class CleanCommand(Command):
"""
Command to tidy up the project root.
Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.
"""
description = "Tidy up the project root"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""Run command to remove temporary files and directories."""
os.chdir(my_dir)
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
class CompileAssets(Command):
"""
Compile and build the frontend assets using yarn and webpack.
Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.
"""
description = "Compile and build the frontend assets"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""Run a command to compile and build assets."""
subprocess.check_call('./airflow/www/compile_assets.sh')
class ListExtras(Command):
"""
List all available extras
Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.
"""
description = "List available extras"
user_options = [] # type: List[str]
def initialize_options(self):
"""Set default values for options."""
def finalize_options(self):
"""Set final values for options."""
def run(self): # noqa
"""List extras."""
print("\n".join(wrap(", ".join(EXTRAS_REQUIREMENTS.keys()), 100)))
def git_version(version_: str) -> str:
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
:param str version_: Semver version
:return: Found Airflow version in Git repo
:rtype: str
"""
try:
import git
try:
repo = git.Repo(os.path.join(*[my_dir, '.git']))
except git.NoSuchPathError:
logger.warning('.git directory not found: Cannot compute the git version')
return ''
except git.InvalidGitRepositoryError:
logger.warning('Invalid .git directory not found: Cannot compute the git version')
return ''
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return f'.dev0+{sha}.dirty'
# commit is clean
return f'.release:{version_}+{sha}'
else:
return 'no_git_version'
def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version"])):
"""
Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
:param str filename: Destination file to write
"""
text = "{}".format(git_version(version))
with open(filename, 'w') as file:
file.write(text)
if os.environ.get('USE_THEME_FROM_GIT'):
_SPHINX_AIRFLOW_THEME_URL = (
"@ https://github.com/apache/airflow-site/releases/download/0.0.4/"
"sphinx_airflow_theme-0.0.4-py3-none-any.whl"
)
else:
_SPHINX_AIRFLOW_THEME_URL = ''
# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py
# If you change this mark you should also change ./scripts/ci/check_order_setup.py
# Start dependencies group
amazon = [
'boto3>=1.15.0,<1.16.0',
'botocore>=1.18.0,<1.19.0',
'watchtower~=0.7.3',
]
apache_beam = [
'apache-beam[gcp]',
]
async_packages = [
'eventlet>= 0.9.7',
'gevent>=0.13',
'greenlet>=0.4.9',
]
atlas = [
'atlasclient>=0.1.2',
]
azure = [
'azure-batch>=8.0.0',
'azure-cosmos>=3.0.1,<4',
'azure-datalake-store>=0.0.45',
'azure-identity>=1.3.1',
'azure-keyvault>=4.1.0',
'azure-kusto-data>=0.0.43,<0.1',
'azure-mgmt-containerinstance>=1.5.0,<2.0',
'azure-mgmt-datalake-store>=0.5.0',
'azure-mgmt-resource>=2.2.0',
'azure-storage>=0.34.0, <0.37.0',
]
cassandra = [
'cassandra-driver>=3.13.0,<3.21.0',
]
celery = [
'celery~=4.4.2',
'flower>=0.7.3, <1.0',
'vine~=1.3', # https://stackoverflow.com/questions/32757259/celery-no-module-named-five
]
cgroups = [
'cgroupspy>=0.1.4',
]
cloudant = [
'cloudant>=2.0',
]
dask = ['cloudpickle>=1.4.1, <1.5.0', 'distributed>=2.11.1, <2.20']
databricks = [
'requests>=2.20.0, <3',
]
datadog = [
'datadog>=0.14.0',
]
doc = [
'sphinx>=2.1.2',
'sphinx-argparse>=0.1.13',
'sphinx-autoapi==1.0.0',
'sphinx-copybutton',
'sphinx-jinja~=1.1',
'sphinx-rtd-theme>=0.1.6',
'sphinxcontrib-httpdomain>=1.7.0',
"sphinxcontrib-redoc>=1.6.0",
"sphinxcontrib-spelling==5.2.1",
f"sphinx-airflow-theme{_SPHINX_AIRFLOW_THEME_URL}",
]
docker = [
'docker~=3.0',
]
druid = [
'pydruid>=0.4.1',
]
elasticsearch = [
'elasticsearch>7, <7.6.0',
'elasticsearch-dbapi==0.1.0',
'elasticsearch-dsl>=5.0.0',
]
exasol = [
'pyexasol>=0.5.1,<1.0.0',
]
facebook = [
'facebook-business>=6.0.2',
]
flask_oauth = [
'Flask-OAuthlib>=0.9.1,<0.9.6', # Flask OAuthLib 0.9.6 requires Flask-Login 0.5.0 - breaks FAB
'oauthlib!=2.0.3,!=2.0.4,!=2.0.5,<3.0.0,>=1.1.2',
'requests-oauthlib<1.2.0',
]
google = [
'PyOpenSSL',
'google-ads>=4.0.0,<8.0.0',
'google-api-python-client>=1.6.0,<2.0.0',
'google-auth>=1.0.0,<2.0.0',
'google-auth-httplib2>=0.0.1',
'google-cloud-automl>=0.4.0,<2.0.0',
'google-cloud-bigquery-datatransfer>=0.4.0,<2.0.0',
'google-cloud-bigtable>=1.0.0,<2.0.0',
'google-cloud-container>=0.1.1,<2.0.0',
'google-cloud-datacatalog>=1.0.0,<2.0.0',
'google-cloud-dataproc>=1.0.1,<2.0.0',
'google-cloud-dlp>=0.11.0,<2.0.0',
'google-cloud-kms>=2.0.0,<3.0.0',
'google-cloud-language>=1.1.1,<2.0.0',
'google-cloud-logging>=1.14.0,<2.0.0',
'google-cloud-memcache>=0.2.0',
'google-cloud-monitoring>=0.34.0,<2.0.0',
'google-cloud-os-login>=2.0.0,<3.0.0',
'google-cloud-pubsub>=2.0.0,<3.0.0',
'google-cloud-redis>=2.0.0,<3.0.0',
'google-cloud-secret-manager>=0.2.0,<2.0.0',
'google-cloud-spanner>=1.10.0,<2.0.0',
'google-cloud-speech>=0.36.3,<2.0.0',
'google-cloud-storage>=1.16,<2.0.0',
'google-cloud-tasks>=1.2.1,<2.0.0',
'google-cloud-texttospeech>=0.4.0,<2.0.0',
'google-cloud-translate>=1.5.0,<2.0.0',
'google-cloud-videointelligence>=1.7.0,<2.0.0',
'google-cloud-vision>=0.35.2,<2.0.0',
'grpcio-gcp>=0.2.2',
'pandas-gbq',
]
grpc = [
'google-auth>=1.0.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'grpcio>=1.15.0',
]
hashicorp = [
'hvac~=0.10',
]
hdfs = [
'snakebite-py3',
]
hive = [
'hmsclient>=0.1.0',
'pyhive[hive]>=0.6.0',
]
jdbc = [
'jaydebeapi>=1.1.1',
]
jenkins = [
'python-jenkins>=1.0.0',
]
jira = [
'JIRA>1.0.7',
]
kerberos = [
'pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
]
kubernetes = [
'cryptography>=2.0.0',
'kubernetes>=3.0.0, <12.0.0',
]
kylin = ['kylinpy>=2.6']
ldap = [
'ldap3>=2.5.1',
]
mongo = [
'dnspython>=1.13.0,<2.0.0',
'pymongo>=3.6.0',
]
mssql = [
'pymssql~=2.1,>=2.1.5',
]
mysql = [
'mysql-connector-python>=8.0.11, <=8.0.18',
'mysqlclient>=1.3.6,<1.4',
]
odbc = [
'pyodbc',
]
oracle = [
'cx_Oracle>=5.1.2',
]
pagerduty = [
'pdpyras>=4.1.2,<5',
]
papermill = [
'papermill[all]>=1.2.1',
'nteract-scrapbook[all]>=0.3.1',
]
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = [
'pinotdb==0.1.1',
]
plexus = [
'arrow>=0.16.0',
]
postgres = [
'psycopg2-binary>=2.7.4',
]
presto = ['presto-python-client>=0.7.0,<0.8']
qubole = [
'qds-sdk>=1.10.4',
]
rabbitmq = [
'amqp<5.0.0',
]
redis = [
'redis~=3.2',
]
salesforce = [
'simple-salesforce>=1.0.0',
]
samba = [
'pysmbclient>=0.1.3',
]
segment = [
'analytics-python>=1.2.9',
]
sendgrid = [
'sendgrid>=6.0.0,<7',
]
sentry = [
'blinker>=1.1',
'sentry-sdk>=0.8.0',
]
singularity = ['spython>=0.0.56']
slack = [
'slackclient>=2.0.0,<3.0.0',
]
snowflake = [
# The `azure` provider uses legacy `azure-storage` library, where `snowflake` uses the
# newer and more stable versions of those libraries. Most of `azure` operators and hooks work
# fine together with `snowflake` because the deprecated library does not overlap with the
# new libraries except the `blob` classes. So while `azure` works fine for most cases
# blob is the only exception
# Solution to that is being worked on in https://github.com/apache/airflow/pull/12188
# once it is merged, we can move those two back to `azure` extra.
'azure-storage-blob',
'azure-storage-common',
# snowflake is not compatible with latest version.
# This library monkey patches the requests library, so SSL is broken globally.
# See: https://github.com/snowflakedb/snowflake-connector-python/issues/324
'requests<2.24.0',
'snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0',
]
spark = [
'pyspark',
]
ssh = [
'paramiko>=2.6.0',
'pysftp>=0.2.9',
'sshtunnel>=0.1.4,<0.2',
]
statsd = [
'statsd>=3.3.0, <4.0',
]
tableau = [
'tableauserverclient~=0.12',
]
telegram = [
'python-telegram-bot==13.0',
]
vertica = [
'vertica-python>=0.5.1',
]
virtualenv = [
'virtualenv',
]
webhdfs = [
'hdfs[avro,dataframe,kerberos]>=2.0.4',
]
winrm = [
'pywinrm~=0.4',
]
yandex = [
'yandexcloud>=0.22.0',
]
zendesk = [
'zdesk',
]
# End dependencies group
all_dbs = (
cassandra
+ cloudant
+ druid
+ exasol
+ hdfs
+ hive
+ mongo
+ mssql
+ mysql
+ pinot
+ postgres
+ presto
+ vertica
)
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# IF you are removing dependencies from this list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
############################################################################################################
devel = [
'beautifulsoup4~=4.7.1',
'black',
'blinker',
'bowler',
'click~=7.1',
'contextdecorator;python_version<"3.4"',
'coverage',
'docutils',
'flake8>=3.6.0',
'flake8-colors',
'flaky',
'freezegun',
'github3.py',
'gitpython',
'importlib-resources~=1.4',
'ipdb',
'jira',
'mongomock',
'moto',
'parameterized',
'paramiko',
'pipdeptree',
'pre-commit',
'pylint==2.6.0',
'pysftp',
'pytest',
'pytest-cov',
'pytest-instafail',
'pytest-rerunfailures',
'pytest-timeouts',
'pytest-xdist',
'pywinrm',
'qds-sdk>=1.9.6',
'requests_mock',
'testfixtures',
'wheel',
'yamllint',
]
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# If you are removing dependencies from the above list, please make sure that you also increase
# DEPENDENCIES_EPOCH_NUMBER in the Dockerfile.ci
############################################################################################################
if PY3:
devel += ['mypy==0.770']
else:
devel += ['unittest2']
devel_minreq = cgroups + devel + doc + kubernetes + mysql + password
devel_hadoop = devel_minreq + hdfs + hive + kerberos + presto + webhdfs
############################################################################################################
# IMPORTANT NOTE!!!!!!!!!!!!!!!
# If you have a 'pip check' problem with dependencies, it might be because some dependency has been
# installed via 'install_requires' in setup.cfg in higher version than required in one of the options below.
# For example pip check was failing with requests=2.25.1 installed even if in some dependencies below
# < 2.24.0 was specified for it. Solution in such case is to add such limiting requirement to
# install_requires in setup.cfg (we've added requests<2.24.0 there to limit requests library).
# This should be done with appropriate comment explaining why the requirement was added.
############################################################################################################
# Those are requirements that each provider package has
PROVIDERS_REQUIREMENTS: Dict[str, Iterable[str]] = {
"amazon": amazon,
"apache.cassandra": cassandra,
"apache.druid": druid,
"apache.hdfs": hdfs,
"apache.hive": hive,
"apache.kylin": kylin,
"apache.livy": [],
"apache.pig": [],
"apache.pinot": pinot,
"apache.spark": spark,
"apache.sqoop": [],
"celery": celery,
"cloudant": cloudant,
"cncf.kubernetes": kubernetes,
"databricks": databricks,
"datadog": datadog,
"dingding": [],
"discord": [],
"docker": docker,
"elasticsearch": elasticsearch,
"exasol": exasol,
"facebook": facebook,
"ftp": [],
"google": google,
"grpc": grpc,
"hashicorp": hashicorp,
"http": [],
"imap": [],
"jdbc": jdbc,
"jenkins": jenkins,
"jira": jira,
"microsoft.azure": azure,
"microsoft.mssql": mssql,
"microsoft.winrm": winrm,
"mongo": mongo,
"mysql": mysql,
"odbc": odbc,
"openfaas": [],
"opsgenie": [],
"oracle": oracle,
"pagerduty": pagerduty,
"papermill": papermill,
"plexus": plexus,
"postgres": postgres,
"presto": presto,
"qubole": qubole,
"redis": redis,
"salesforce": salesforce,
"samba": samba,
"segment": segment,
"sendgrid": sendgrid,
"sftp": ssh,
"singularity": singularity,
"slack": slack,
"snowflake": snowflake,
"sqlite": [],
"ssh": ssh,
"telegram": telegram,
"vertica": vertica,
"yandex": yandex,
"zendesk": zendesk,
}
# Those are requirements that each extra has. For extras that match the providers
# the requirements are identical as in the list above, but we have still a few aliases
# that have different set of requirements.
EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {
'all_dbs': all_dbs,
'amazon': amazon,
'apache.atlas': atlas,
'apache.beam': apache_beam,
"apache.cassandra": cassandra,
"apache.druid": druid,
"apache.hdfs": hdfs,
"apache.hive": hive,
"apache.kylin": kylin,
"apache.livy": [],
"apache.pig": [],
"apache.pinot": pinot,
"apache.spark": spark,
"apache.sqoop": [],
"apache.webhdfs": webhdfs,
'async': async_packages,
'atlas': atlas, # TODO: remove this in Airflow 3.0
'aws': amazon, # TODO: remove this in Airflow 3.0
'azure': azure, # TODO: remove this in Airflow 3.0
'cassandra': cassandra, # TODO: remove this in Airflow 3.0
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'cncf.kubernetes': kubernetes,
'crypto': [], # TODO: remove this in Airflow 3.0
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'dingding': [],
'discord': [],
'docker': docker,
'druid': druid, # TODO: remove this in Airflow 3.0
'elasticsearch': elasticsearch,
'exasol': exasol,
'facebook': facebook,
'ftp': [],
'gcp': google, # TODO: remove this in Airflow 3.0
'gcp_api': google, # TODO: remove this in Airflow 3.0
'github_enterprise': flask_oauth,
'google': google,
'google_auth': flask_oauth,
'grpc': grpc,
'hashicorp': hashicorp,
'hdfs': hdfs, # TODO: remove this in Airflow 3.0
'hive': hive, # TODO: remove this in Airflow 3.0
'http': [],
'imap': [],
'jdbc': jdbc,
'jenkins': [],
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes, # TODO: remove this in Airflow 3.0
'ldap': ldap,
"microsoft.azure": azure,
"microsoft.mssql": mssql,
"microsoft.winrm": winrm,
'mongo': mongo,
'mssql': mssql, # TODO: remove this in Airflow 3.0
'mysql': mysql,
'odbc': odbc,
'openfaas': [],
'opsgenie': [],
'oracle': oracle,
'pagerduty': pagerduty,
'papermill': papermill,
'password': password,
'pinot': pinot, # TODO: remove this in Airflow 3.0
'plexus': plexus,
'postgres': postgres,
'presto': presto,
'qds': qubole, # TODO: remove this in Airflow 3.0
'qubole': qubole,
'rabbitmq': rabbitmq,
'redis': redis,
's3': amazon, # TODO: remove this in Airflow 3.0
'salesforce': salesforce,
'samba': samba,
'segment': segment,
'sendgrid': sendgrid,
'sentry': sentry,
'sftp': [],
'singularity': singularity,
'slack': slack,
'snowflake': snowflake,
'spark': spark,
'sqlite': [],
'ssh': ssh,
'statsd': statsd,
'tableau': tableau,
'telegram': telegram,
'vertica': vertica,
'virtualenv': virtualenv,
'webhdfs': webhdfs, # TODO: remove this in Airflow 3.0
'winrm': winrm, # TODO: remove this in Airflow 3.0
'yandex': yandex,
'zendesk': [],
}
# Those are airflow providers added for the extras in many cases extra = provider
# But for aliases and some special aliases (like all_dbs) the list might be longer.
EXTRAS_PROVIDERS_PACKAGES: Dict[str, Iterable[str]] = {
'all': list(PROVIDERS_REQUIREMENTS.keys()),
# this is not 100% accurate with devel_ci and devel_all definition, but we really want
# to have all providers when devel_ci extra is installed!
'devel_ci': list(PROVIDERS_REQUIREMENTS.keys()),
'devel_all': list(PROVIDERS_REQUIREMENTS.keys()),
'all_dbs': [
"apache.cassandra",
"apache.druid",
"apache.hdfs",
"apache.hive",
"apache.pinot",
"cloudant",
"exasol",
"mongo",
"microsoft.mssql",
"mysql",
"postgres",
"presto",
"vertica",
],
'amazon': ["amazon"],
'apache.atlas': [],
'apache.beam': [],
"apache.cassandra": ["apache.cassandra"],
"apache.druid": ["apache.druid"],
"apache.hdfs": ["apache.hdfs"],
"apache.hive": ["apache.hive"],
"apache.kylin": ["apache.kylin"],
"apache.livy": ["apache.livy"],
"apache.pig": ["apache.pig"],
"apache.pinot": ["apache.pinot"],
"apache.spark": ["apache.spark"],
"apache.sqoop": ["apache.sqoop"],
"apache.webhdfs": ["apache.hdfs"],
'async': [],
'atlas': [], # TODO: remove this in Airflow 3.0
'aws': ["amazon"], # TODO: remove this in Airflow 3.0
'azure': ["microsoft.azure"], # TODO: remove this in Airflow 3.0
'cassandra': ["apache.cassandra"], # TODO: remove this in Airflow 3.0
'celery': ["celery"],
'cgroups': [],
'cloudant': ["cloudant"],
'cncf.kubernetes': ["cncf.kubernetes"],
'crypto': [], # TODO: remove this in Airflow 3.0
'dask': [],
'databricks': ["databricks"],
'datadog': ["datadog"],
'devel': ["cncf.kubernetes", "mysql"],
'devel_hadoop': ["apache.hdfs", "apache.hive", "presto"],
'dingding': ["dingding"],
'discord': ["discord"],
'doc': [],
'docker': ["docker"],
'druid': ["apache.druid"], # TODO: remove this in Airflow 3.0
'elasticsearch': ["elasticsearch"],
'exasol': ["exasol"],
'facebook': ["facebook"],
'ftp': ["ftp"],
'gcp': ["google"], # TODO: remove this in Airflow 3.0
'gcp_api': ["google"], # TODO: remove this in Airflow 3.0
'github_enterprise': [],
'google': ["google"],
'google_auth': [],
'grpc': ["grpc"],
'hashicorp': ["hashicorp"],
'hdfs': ["apache.hdfs"], # TODO: remove this in Airflow 3.0
'hive': ["apache.hive"], # TODO: remove this in Airflow 3.0
'http': ["http"],
'imap': ["imap"],
'jdbc': ["jdbc"],
'jenkins': ["jenkins"],
'jira': ["jira"],
'kerberos': [],
'kubernetes': ["cncf.kubernetes"], # TODO: remove this in Airflow 3.0
'ldap': [],
"microsoft.azure": ["microsoft.azure"],
"microsoft.mssql": ["microsoft.mssql"],
"microsoft.winrm": ["microsoft.winrm"],
'mongo': ["mongo"],
'mssql': ["microsoft.mssql"], # TODO: remove this in Airflow 3.0
'mysql': ["mysql"],
'odbc': ["odbc"],
'openfaas': ["openfaas"],
'opsgenie': ["opsgenie"],
'oracle': ["oracle"],
'pagerduty': ["pagerduty"],
'papermill': ["papermill"],
'password': [],
'pinot': ["apache.pinot"], # TODO: remove this in Airflow 3.0
'plexus': ["plexus"],
'postgres': ["postgres"],
'presto': ["presto"],
'qds': ["qubole"], # TODO: remove this in Airflow 3.0
'qubole': ["qubole"],
'rabbitmq': [],
'redis': ["redis"],
's3': ["amazon"], # TODO: remove this in Airflow 3.0
'salesforce': ["salesforce"],
'samba': ["samba"],
'segment': ["segment"],
'sendgrid': ["sendgrid"],
'sentry': [],
'sftp': ["sftp"],
'singularity': ["singularity"],
'slack': ["slack"],
'snowflake': ["snowflake"],
'spark': ["apache.spark"],
'sqlite': ["sqlite"],
'ssh': ["ssh"],
'statsd': [],
'tableau': [],
'telegram': ["telegram"],
'vertica': ["vertica"],
'virtualenv': [],
'webhdfs': ["apache.hdfs"], # TODO: remove this in Airflow 3.0
'winrm': ["microsoft.winrm"], # TODO: remove this in Airflow 3.0
'yandex': ["yandex"],
'zendesk': ["zendesk"],
}
# Those are all "users" extras (no devel extras)
all_ = list(
set(
[req for req_list in EXTRAS_REQUIREMENTS.values() for req in req_list]
+ [req for req_list in PROVIDERS_REQUIREMENTS.values() for req in req_list]
)
)
# Those are special extras
EXTRAS_REQUIREMENTS.update(
{
'all': all_,
'devel': devel_minreq, # includes doc
'devel_hadoop': devel_hadoop, # includes devel_minreq
'doc': doc,
}
)
# This can be simplify to devel_hadoop + all_ due to inclusions
# but we keep it for explicit sake
devel_all = list(set(all_ + doc + devel_minreq + devel_hadoop))
# Those are packages excluded for "all" dependencies
PACKAGES_EXCLUDED_FOR_ALL = []
if PY3:
PACKAGES_EXCLUDED_FOR_ALL.extend(
[
'snakebite',
]
)
# Those packages are excluded because they break tests (downgrading mock) and they are
# not needed to run our test suite.
PACKAGES_EXCLUDED_FOR_CI = [
'apache-beam',
]
def is_package_excluded(package: str, exclusion_list: List[str]):
"""
Checks if package should be excluded.
:param package: package name (beginning of it)
:param exclusion_list: list of excluded packages
:return: true if package should be excluded
"""
return any(package.startswith(excluded_package) for excluded_package in exclusion_list)
devel_all = [
package
for package in devel_all
if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)
]
devel_ci = [
package
for package in devel_all
if not is_package_excluded(
package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_CI + PACKAGES_EXCLUDED_FOR_ALL
)
]
# Those are development requirements that install all useful devel tools
EXTRAS_REQUIREMENTS.update(
{
'devel_all': devel_all,
'devel_ci': devel_ci,
}
)
class AirflowDistribtuion(Distribution):
"""setuptools.Distribution subclass with Airflow specific behaviour"""
# https://github.com/PyCQA/pylint/issues/3737
def parse_config_files(self, *args, **kwargs): # pylint: disable=signature-differs
"""
Ensure that when we have been asked to install providers from sources
that we don't *also* try to install those providers from PyPI
"""
super().parse_config_files(*args, **kwargs)
if os.getenv('INSTALL_PROVIDERS_FROM_SOURCES') == 'true':
self.install_requires = [ # pylint: disable=attribute-defined-outside-init
req for req in self.install_requires if not req.startswith('apache-airflow-providers-')
]
def get_provider_package_from_package_id(package_id: str):
"""
Builds the name of provider package out of the package id provided/
:param package_id: id of the package (like amazon or microsoft.azure)
:return: full name of package in PyPI
"""
package_suffix = package_id.replace(".", "-")
return f"apache-airflow-providers-{package_suffix}"
def do_setup():
"""Perform the Airflow package setup."""
setup_kwargs = {}
if os.getenv('INSTALL_PROVIDERS_FROM_SOURCES') == 'true':
# Only specify this if we need this option, otherwise let default from
# setup.cfg control this (kwargs in setup() call take priority)
setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])
else:
for key, value in EXTRAS_PROVIDERS_PACKAGES.items():
EXTRAS_REQUIREMENTS[key].extend(
[get_provider_package_from_package_id(package_name) for package_name in value]
)
write_version()
setup(
distclass=AirflowDistribtuion,
# Most values come from setup.cfg -- see
# https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html
version=version,
extras_require=EXTRAS_REQUIREMENTS,
download_url=('https://archive.apache.org/dist/airflow/' + version),
cmdclass={
'extra_clean': CleanCommand,
'compile_assets': CompileAssets,
'list_extras': ListExtras,
},
test_suite='setup.airflow_test_suite',
**setup_kwargs,
)
if __name__ == "__main__":
do_setup()
| apache-2.0 |
jaeilepp/mne-python | examples/time_frequency/plot_compute_raw_data_spectrum.py | 1 | 4781 | """
==================================================
Compute the power spectral density of raw data
==================================================
This script shows how to compute the power spectral density (PSD)
of measurements on a raw dataset. It also show the effect of applying SSP
to the data to reduce ECG and EOG artifacts.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io, read_proj, read_selection
from mne.datasets import sample
from mne.time_frequency import psd_multitaper
print(__doc__)
###############################################################################
# Load data
# ---------
#
# We'll load a sample MEG dataset, along with SSP projections that will
# allow us to reduce EOG and ECG artifacts. For more information about
# reducing artifacts, see the preprocessing section in :ref:`documentation`.
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog-proj.fif'
tmin, tmax = 0, 60 # use the first 60s of data
# Setup for reading the raw data (to save memory, crop before loading)
raw = io.read_raw_fif(raw_fname).crop(tmin, tmax).load_data()
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Add SSP projection vectors to reduce EOG and ECG artifacts
projs = read_proj(proj_fname)
raw.add_proj(projs, remove_existing=True)
fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
###############################################################################
# Plot the raw PSD
# ----------------
#
# First we'll visualize the raw PSD of our data. We'll do this on all of the
# channels first. Note that there are several parameters to the
# :meth:`mne.io.Raw.plot_psd` method, some of which will be explained below.
raw.plot_psd(area_mode='range', tmax=10.0, show=False)
###############################################################################
# Plot a cleaned PSD
# ------------------
#
# Next we'll focus the visualization on a subset of channels.
# This can be useful for identifying particularly noisy channels or
# investigating how the power spectrum changes across channels.
#
# We'll visualize how this PSD changes after applying some standard
# filtering techniques. We'll first apply the SSP projections, which is
# accomplished with the ``proj=True`` kwarg. We'll then perform a notch filter
# to remove particular frequency bands.
# Pick MEG magnetometers in the Left-temporal region
selection = read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads', selection=selection)
# Let's just look at the first few channels for demonstration purposes
picks = picks[:4]
plt.figure()
ax = plt.axes()
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=False, ax=ax, color=(0, 0, 1), picks=picks,
show=False)
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(0, 1, 0), picks=picks,
show=False)
# And now do the same with SSP + notch filtering
# Pick all channels for notch since the SSP projection mixes channels together
raw.notch_filter(np.arange(60, 241, 60), n_jobs=1)
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(1, 0, 0), picks=picks,
show=False)
ax.set_title('Four left-temporal magnetometers')
plt.legend(ax.lines[::3], ['Without SSP', 'With SSP', 'SSP + Notch'])
###############################################################################
# Alternative functions for PSDs
# ------------------------------
#
# There are also several functions in MNE that create a PSD using a Raw
# object. These are in the :mod:`mne.time_frequency` module and begin with
# ``psd_*``. For example, we'll use a multitaper method to compute the PSD
# below.
f, ax = plt.subplots()
psds, freqs = psd_multitaper(raw, low_bias=True, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, proj=True, picks=picks,
n_jobs=1)
psds = 10 * np.log10(psds)
psds_mean = psds.mean(0)
psds_std = psds.std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD', xlabel='Frequency',
ylabel='Power Spectral Density (dB)')
plt.show()
| bsd-3-clause |
ashapochka/saapy | samples/povray.py | 1 | 8298 | # coding=utf-8
import contextlib
import shelve
from collections import OrderedDict
from pathlib import Path
from pprint import pprint
import sys
from typing import Iterable
from invoke import Program, task, Collection
from profilehooks import timecall, profile
import pandas as pd
from saapy.analysis import (ActorParser, csv_to_list, ActorSimilarityGraph)
from saapy.vcs import GitClient
from saapy.codetools import ScitoolsClient, ScitoolsProject
class Povray:
root_path: Path
git_repo_path: Path
analysis_dir_path: Path
shelve_db_path: Path
scitools_udb_path: Path
git_graph = None
similarity_graph = None
scitools_project = None
scitools_client = None
def __init__(self, root_dir):
self.root_path = Path(root_dir).resolve()
self.git_repo_path = self.root_path / 'povray'
self.analysis_dir_path = self.root_path / 'povray-analysis'
self.shelve_db_path = self.analysis_dir_path / 'povray.shelve'
self.scitools_udb_path = self.analysis_dir_path / 'povray-master.udb'
self.scitools_client = ScitoolsClient(self.scitools_udb_path)
@timecall
def build_git_graph(self):
with shelve.open(str(self.shelve_db_path)) as db:
git_client = GitClient(self.git_repo_path)
git_graph = git_client.build_commit_graph()
git_client.add_commit_tree(git_graph, ref_name='origin/master')
db['git_graph'] = git_graph
self.git_graph = git_graph
@timecall
def load_git_graph(self):
with shelve.open(str(self.shelve_db_path)) as db:
if 'git_graph' in db:
self.git_graph = db['git_graph']
else:
self.git_graph = None
return self.git_graph
def collect_source_files(self):
master_commit = self.git_graph.commit_node(
ref_name='origin/master')['hexsha']
return self.git_graph.collect_files(
master_commit,
tree_node='source',
predicate=lambda f: f.endswith(('.cpp', '.h')))
def source_commit_frame(self) -> pd.DataFrame:
source_files = self.collect_source_files()
frame = self.git_graph.file_commit_frame(source_files)
return frame
@timecall
def build_similarity_graph(self):
actor_parser = ActorParser()
role_names = csv_to_list(Path('../data/names.csv'))
actor_parser.add_role_names(role_names)
actors = [actor_parser.parse_actor(
self.git_graph.commit_graph.node[actor_id]['name'],
self.git_graph.commit_graph.node[actor_id]['email'])
for actor_id in self.git_graph.actors]
similarity_graph = ActorSimilarityGraph()
for actor in actors:
similarity_graph.add_actor(actor)
self.similarity_graph = similarity_graph
@timecall
def build_understand_project(self):
if not self.scitools_client.project_exists():
self.build_git_graph()
source_files = self.collect_source_files()
self.scitools_client.create_project()
self.scitools_client.add_files_to_project(
self.git_repo_path / f for f in source_files)
self.scitools_client.analyze_project()
@timecall
def build_code_graph(self):
if not self.scitools_client.project_exists():
print('understand project does not exist, '
'first run "$ povray understand --build"')
else:
with shelve.open(str(self.shelve_db_path)) as db:
self.scitools_client.open_project()
self.scitools_project = self.scitools_client.build_project(
self.git_repo_path)
self.scitools_client.close_project()
db['code_graph'] = self.scitools_project
print('loaded scitools project of size',
len(self.scitools_project.code_graph))
print('entity kinds:', self.scitools_project.entity_kinds)
print('ref kinds:', self.scitools_project.entity_kinds)
@timecall
def load_code_graph(self) -> ScitoolsProject:
with shelve.open(str(self.shelve_db_path)) as db:
if 'code_graph' in db:
self.scitools_project = db['code_graph']
else:
self.scitools_project = None
return self.scitools_project
@timecall
def entity_class_metrics(self):
project = self.load_code_graph()
entities = []
entity_comments = []
refs = []
for node, data in project.code_graph.nodes_iter(data=True):
if ('node_type' not in data
or 'entity' != data['node_type']
or 'kindname' not in data
or 'class' not in data['kindname']
or 'unknown' in data['kindname']
or 'CountLineCode' not in data['metrics']
or not data['metrics']['CountLineCode']):
continue
entity = {'name': data['name'],
'longname': data['longname'],
'kindname': data['kindname']}
entity.update(data['metrics'])
entities.append(entity)
entity_comments.append({'name': data['longname'],
'comments': data['comments']})
entity_defs = []
for origin, dest, ref_data in project.code_graph.out_edges_iter(
nbunch=node, data=True):
dest_data = project.code_graph.node[dest]
ref = {'origin': data['longname'],
'destination': dest_data['longname']
if 'longname' in dest_data else '',
'name': dest_data['name']
if 'name' in dest_data else '',
'dest_kind': dest_data['kindname']
if 'kindname' in dest_data else '',
'ref': ref_data['name']
if 'name' in ref_data else '',
'ref_kind': ref_data['kind_longname']
if 'kind_longname' in ref_data else ''}
refs.append(ref)
if 'c define' == ref['ref_kind']:
entity_defs.append(ref['name'])
entity['defs'] = ' '.join(entity_defs)
entity_frame = pd.DataFrame(data=entities)
entity_comment_frame = pd.DataFrame(data=entity_comments)
ref_frame = pd.DataFrame(data=refs)
entity_frame.to_csv(
self.analysis_dir_path / 'entities.csv', index=False)
entity_comment_frame.to_csv(
self.analysis_dir_path / 'entity-comments.csv', index=False)
ref_frame.to_csv(
self.analysis_dir_path / 'entity-refs.csv', index=False)
@task
def cleanup(ctx):
pv = Povray(ctx.config.povray.parent_dir)
with contextlib.suppress(FileNotFoundError):
pv.shelve_db_path.unlink()
pv.scitools_udb_path.unlink()
@task
def git_graph(ctx):
pv = Povray(ctx.config.povray.parent_dir)
pv.build_git_graph()
print('git graph built and saved with',
len(pv.git_graph.commit_graph), 'nodes')
@task
def sim_graph(ctx):
pv = Povray(ctx.config.povray.parent_dir)
if not pv.load_git_graph():
print('git graph does not exist,'
'first run "$ povray git_graph" to build it')
return 1
pv.build_similarity_graph()
pv.similarity_graph.print_similarity_groups()
@task
def understand(ctx):
pv = Povray(ctx.config.povray.parent_dir)
pv.build_understand_project()
@task
def code_graph(ctx, build=False, load=False):
pv = Povray(ctx.config.povray.parent_dir)
if build:
pv.build_code_graph()
elif load:
pv.load_code_graph()
@task
def metrics(ctx):
pv = Povray(ctx.config.povray.parent_dir)
pv.entity_class_metrics()
@task
def history(ctx):
pv = Povray(ctx.config.povray.parent_dir)
pv.load_git_graph()
df = pv.source_commit_frame()
print(df.corr(method='spearman'))
def main():
program = Program(namespace=Collection.from_module(sys.modules[__name__]),
version='0.1.0')
program.run()
if __name__ == '__main__':
main()
| apache-2.0 |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Geneva_cont_NoRot/Geneva_cont_NoRot_8/fullgrid/peaks_reader.py | 1 | 5310 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
print "Starting"
numFiles = 3
gridfile = [None]*numFiles
Elines = [None]*numFiles
for i in range(3):
for file in os.listdir('.'):
if file.endswith("Geneva_cont_8_{:d}.grd".format(i+1)):
gridfile[i] = file
print file
if file.endswith("Geneva_cont_8_{:d}.txt".format(i+1)):
Elines[i] = file
print file
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile[0], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile[1], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile[2], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines[0], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines[1], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines[2], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
headers = headers[1:]
# ---------------------------------------------------
# ---------------------------------------------------
#To fix when hdens > 10
hdens_values_2 = empty(shape=[0, 1])
phi_values_2 = empty(shape=[0, 1])
Emissionlines_2 = empty(shape=[0, len(Emissionlines[0,:])])
for i in range(len(hdens_values)):
if float(hdens_values[i]) < 10.100 :
hdens_values_2 = append(hdens_values_2, hdens_values[i])
phi_values_2 = append(phi_values_2, phi_values[i])
Emissionlines_2 = vstack([Emissionlines_2, Emissionlines[i,:]])
#overwrite old arrays
hdens_values = hdens_values_2
phi_values = phi_values_2
Emissionlines = Emissionlines_2
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "peaks pulled"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
savetxt('peaks', max_values, delimiter='\t')
print "peaks saved"
| gpl-2.0 |
aetilley/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
waynenilsen/statsmodels | statsmodels/examples/ex_kernel_regression_dgp.py | 34 | 1202 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 06 09:50:54 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.dgp_examples as dgp
seed = np.random.randint(999999)
seed = 430973
print(seed)
np.random.seed(seed)
funcs = [dgp.UnivariateFanGijbels1(),
dgp.UnivariateFanGijbels2(),
dgp.UnivariateFanGijbels1EU(),
#dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4))
dgp.UnivariateFunc1()
]
res = []
fig = plt.figure()
for i,func in enumerate(funcs):
#f = func()
f = func
model = KernelReg(endog=[f.y], exog=[f.x], reg_type='ll',
var_type='c', bw='cv_ls')
mean, mfx = model.fit()
ax = fig.add_subplot(2, 2, i+1)
f.plot(ax=ax)
ax.plot(f.x, mean, color='r', lw=2, label='est. mean')
ax.legend(loc='upper left')
res.append((model, mean, mfx))
fig.suptitle('Kernel Regression')
fig.show()
| bsd-3-clause |
wakeful-sun/imageprocessor | code/lines_sorting.py | 1 | 4508 | import numpy as np
import pandas
class CoordinateSorter:
def __init__(self, max_distance_delta, max_angle_delta, threshold):
"""
Scenario: 'threshold' parameter allows to filter out noise lines
Given: threshold = 4, set of lines
When: sorter found 3 groups of lines
And: the first set of lines contains 10 lines, second - 5 lines
But: the third set of lines contains 3 lines
Then: the third considered as noise and will not be presented in sorting result
--
Scenario: 'max_distance_delta' and 'max_angle_delta' parameters allow to control line group detection
Given: 5 lines have been given to sort
And: it is possible to create a chain 'chain_1' of lines line1, line2, line3
Where: distance between links is less (or equal) then (max_distance_delta, max_angle_delta)
And: it is possible to create a chain 'chain_2' of lines line4, line5
Where: distance between links is less (or equal) then (max_distance_delta, max_angle_delta)
And: distance between chain_1 and chain_2 edges is more than (max_distance_delta, max_angle_delta)
Then: chain_1 and chain_2 considered as two separate lines
--
Resulting line is calculate as median of all lines in a group
:param max_distance_delta: rho1 - rho2
:param max_angle_delta: theta1 - theta2 threshold in radians
:param threshold: min lines amount in one group
"""
if max_angle_delta < 0:
raise ValueError("[max_angle_delta] must be positive number")
if max_angle_delta < 0:
raise ValueError("[max_angle_delta] must be positive number")
if threshold < 1 or type(threshold) != int:
raise ValueError("[threshold] expected to be integer greater then or equal to 1")
self._max_point_distance = (max_distance_delta, max_angle_delta)
self._min_points_amount = threshold
def _sortPointsByDistance(self, points_dict):
set_list = list()
for key, value in points_dict.items():
indexes_set = set()
set_list.append(indexes_set)
indexes_set.add(key)
for inner_key, inner_value in points_dict.items():
point_distance = abs(np.subtract(value, inner_value))
if point_distance[0] <= self._max_point_distance[0] \
and point_distance[1] <= self._max_point_distance[1]:
indexes_set.add(inner_key)
return set_list
def _splitOnGroups(self, set_list_source):
sorted_source = list(set_list_source)
sorted_source.sort(key=len, reverse=True)
extremums = list()
def find_extremums(ordered_list_of_set_items):
if len(ordered_list_of_set_items) == 0:
return
first_extremum = ordered_list_of_set_items[0]
items_for_further_sorting = list()
for dot_set in ordered_list_of_set_items:
if dot_set.issubset(first_extremum):
continue
else:
if len(first_extremum.intersection(dot_set)):
first_extremum = first_extremum.union(dot_set)
else:
items_for_further_sorting.append(dot_set)
extremums.append(first_extremum)
find_extremums(items_for_further_sorting)
find_extremums(sorted_source)
filtered_extremums = filter(lambda x: len(x) >= self._min_points_amount, extremums)
return filtered_extremums
@staticmethod
def _getMedian(source_dict, key_set):
point_array = [source_dict[item] for item in key_set]
data_frame = pandas.DataFrame(data=point_array, columns=["distance", "angle"])
return data_frame["distance"].median(), data_frame["angle"].median()
def sort(self, points_array):
if len(points_array) < self._min_points_amount:
return []
points_dictionary = dict()
for index, coordinates in enumerate(points_array):
points_dictionary[index] = (int(coordinates[0]), coordinates[1])
point_set_list = self._sortPointsByDistance(points_dictionary)
point_groups = self._splitOnGroups(point_set_list)
resulting_points = [self._getMedian(points_dictionary, point_group) for point_group in point_groups]
return resulting_points
| mit |
gagneurlab/concise | concise/preprocessing/sequence.py | 1 | 13277 | import sklearn.preprocessing
import numpy as np
# vocabularies:
DNA = ["A", "C", "G", "T"]
RNA = ["A", "C", "G", "U"]
AMINO_ACIDS = ["A", "R", "N", "D", "B", "C", "E", "Q", "Z", "G", "H",
"I", "L", "K", "M", "F", "P", "S", "T", "W", "Y", "V"]
CODONS = ["AAA", "AAC", "AAG", "AAT", "ACA", "ACC", "ACG", "ACT", "AGA",
"AGC", "AGG", "AGT", "ATA", "ATC", "ATG", "ATT", "CAA", "CAC",
"CAG", "CAT", "CCA", "CCC", "CCG", "CCT", "CGA", "CGC", "CGG",
"CGT", "CTA", "CTC", "CTG", "CTT", "GAA", "GAC", "GAG", "GAT",
"GCA", "GCC", "GCG", "GCT", "GGA", "GGC", "GGG", "GGT", "GTA",
"GTC", "GTG", "GTT", "TAC", "TAT", "TCA", "TCC", "TCG", "TCT",
"TGC", "TGG", "TGT", "TTA", "TTC", "TTG", "TTT"]
STOP_CODONS = ["TAG", "TAA", "TGA"]
def _get_vocab_dict(vocab):
return {l: i for i, l in enumerate(vocab)}
def _get_index_dict(vocab):
return {i: l for i, l in enumerate(vocab)}
def one_hot2token(arr):
return arr.argmax(axis=2)
# TODO - take into account the neutral vocab
def one_hot2string(arr, vocab):
"""Convert a one-hot encoded array back to string
"""
tokens = one_hot2token(arr)
indexToLetter = _get_index_dict(vocab)
return [''.join([indexToLetter[x] for x in row]) for row in tokens]
def tokenize(seq, vocab, neutral_vocab=[]):
"""Convert sequence to integers
# Arguments
seq: Sequence to encode
vocab: Vocabulary to use
neutral_vocab: Neutral vocabulary -> assign those values to -1
# Returns
List of length `len(seq)` with integers from `-1` to `len(vocab) - 1`
"""
# Req: all vocabs have the same length
if isinstance(neutral_vocab, str):
neutral_vocab = [neutral_vocab]
nchar = len(vocab[0])
for l in vocab + neutral_vocab:
assert len(l) == nchar
assert len(seq) % nchar == 0 # since we are using striding
vocab_dict = _get_vocab_dict(vocab)
for l in neutral_vocab:
vocab_dict[l] = -1
# current performance bottleneck
return [vocab_dict[seq[(i * nchar):((i + 1) * nchar)]] for i in range(len(seq) // nchar)]
# 512 ms vs 121 -> 4x slower than custom token2one_hot
# def token2one_hot(tvec, vocab_size):
# """
# Note: everything out of the vucabulary is transformed into `np.zeros(vocab_size)`
# """
# # This costs the most - memory allocation?
# lb = sklearn.preprocessing.LabelBinarizer()
# lb.fit(range(vocab_size))
# return lb.transform(tvec)
# # alternatively:
# # return sklearn.preprocessing.label_binarize(tvec, list(range(vocab_size)))
def token2one_hot(tvec, vocab_size):
"""
Note: everything out of the vucabulary is transformed into `np.zeros(vocab_size)`
"""
arr = np.zeros((len(tvec), vocab_size))
tvec_range = np.arange(len(tvec))
tvec = np.asarray(tvec)
arr[tvec_range[tvec >= 0], tvec[tvec >= 0]] = 1
return arr
def encodeSequence(seq_vec, vocab, neutral_vocab, maxlen=None,
seq_align="start", pad_value="N", encode_type="one_hot"):
"""Convert a list of genetic sequences into one-hot-encoded array.
# Arguments
seq_vec: list of strings (genetic sequences)
vocab: list of chars: List of "words" to use as the vocabulary. Can be strings of length>0,
but all need to have the same length. For DNA, this is: ["A", "C", "G", "T"].
neutral_vocab: list of chars: Values used to pad the sequence or represent unknown-values. For DNA, this is: ["N"].
maxlen: int or None,
Should we trim (subset) the resulting sequence. If None don't trim.
Note that trims wrt the align parameter.
It should be smaller than the longest sequence.
seq_align: character; 'end' or 'start'
To which end should we align sequences?
encode_type: "one_hot" or "token". "token" represents each vocab element as a positive integer from 1 to len(vocab) + 1.
neutral_vocab is represented with 0.
# Returns
Array with shape for encode_type:
- "one_hot": `(len(seq_vec), maxlen, len(vocab))`
- "token": `(len(seq_vec), maxlen)`
If `maxlen=None`, it gets the value of the longest sequence length from `seq_vec`.
"""
if isinstance(neutral_vocab, str):
neutral_vocab = [neutral_vocab]
if isinstance(seq_vec, str):
raise ValueError("seq_vec should be an iterable returning " +
"strings not a string itself")
assert len(vocab[0]) == len(pad_value)
assert pad_value in neutral_vocab
assert encode_type in ["one_hot", "token"]
seq_vec = pad_sequences(seq_vec, maxlen=maxlen,
align=seq_align, value=pad_value)
if encode_type == "one_hot":
arr_list = [token2one_hot(tokenize(seq, vocab, neutral_vocab), len(vocab))
for i, seq in enumerate(seq_vec)]
elif encode_type == "token":
arr_list = [1 + np.array(tokenize(seq, vocab, neutral_vocab)) for seq in seq_vec]
# we add 1 to be compatible with keras: https://keras.io/layers/embeddings/
# indexes > 0, 0 = padding element
return np.stack(arr_list)
def encodeDNA(seq_vec, maxlen=None, seq_align="start"):
"""Convert the DNA sequence into 1-hot-encoding numpy array
# Arguments
seq_vec: list of chars
List of sequences that can have different lengths
maxlen: int or None,
Should we trim (subset) the resulting sequence. If None don't trim.
Note that trims wrt the align parameter.
It should be smaller than the longest sequence.
seq_align: character; 'end' or 'start'
To which end should we align sequences?
# Returns
3D numpy array of shape (len(seq_vec), trim_seq_len(or maximal sequence length if None), 4)
# Example
```python
>>> sequence_vec = ['CTTACTCAGA', 'TCTTTA']
>>> X_seq = encodeDNA(sequence_vec, seq_align="end", maxlen=8)
>>> X_seq.shape
(2, 8, 4)
>>> print(X_seq)
[[[0 0 0 1]
[1 0 0 0]
[0 1 0 0]
[0 0 0 1]
[0 1 0 0]
[1 0 0 0]
[0 0 1 0]
[1 0 0 0]]
[[0 0 0 0]
[0 0 0 0]
[0 0 0 1]
[0 1 0 0]
[0 0 0 1]
[0 0 0 1]
[0 0 0 1]
[1 0 0 0]]]
```
"""
return encodeSequence(seq_vec,
vocab=DNA,
neutral_vocab="N",
maxlen=maxlen,
seq_align=seq_align,
pad_value="N",
encode_type="one_hot")
def encodeRNA(seq_vec, maxlen=None, seq_align="start"):
"""Convert the RNA sequence into 1-hot-encoding numpy array as for encodeDNA
"""
return encodeSequence(seq_vec,
vocab=RNA,
neutral_vocab="N",
maxlen=maxlen,
seq_align=seq_align,
pad_value="N",
encode_type="one_hot")
def encodeCodon(seq_vec, ignore_stop_codons=True, maxlen=None, seq_align="start", encode_type="one_hot"):
"""Convert the Codon sequence into 1-hot-encoding numpy array
# Arguments
seq_vec: List of strings/DNA sequences
ignore_stop_codons: boolean; if True, STOP_CODONS are omitted from one-hot encoding.
maxlen: Maximum sequence length. See `pad_sequences` for more detail
seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail
encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ).
# Returns
numpy.ndarray of shape `(len(seq_vec), maxlen / 3, 61 if ignore_stop_codons else 64)`
"""
if ignore_stop_codons:
vocab = CODONS
neutral_vocab = STOP_CODONS + ["NNN"]
else:
vocab = CODONS + STOP_CODONS
neutral_vocab = ["NNN"]
# replace all U's with A's?
seq_vec = [str(seq).replace("U", "T") for seq in seq_vec]
return encodeSequence(seq_vec,
vocab=vocab,
neutral_vocab=neutral_vocab,
maxlen=maxlen,
seq_align=seq_align,
pad_value="NNN",
encode_type=encode_type)
def encodeAA(seq_vec, maxlen=None, seq_align="start", encode_type="one_hot"):
"""Convert the Amino-acid sequence into 1-hot-encoding numpy array
# Arguments
seq_vec: List of strings/amino-acid sequences
maxlen: Maximum sequence length. See `pad_sequences` for more detail
seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail
encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ).
# Returns
numpy.ndarray of shape `(len(seq_vec), maxlen, 22)`
"""
return encodeSequence(seq_vec,
vocab=AMINO_ACIDS,
neutral_vocab="_",
maxlen=maxlen,
seq_align=seq_align,
pad_value="_",
encode_type=encode_type)
def pad_sequences(sequence_vec, maxlen=None, align="end", value="N"):
"""Pad and/or trim a list of sequences to have common length. Procedure:
1. Pad the sequence with N's or any other string or list element (`value`)
2. Subset the sequence
# Note
See also: https://keras.io/preprocessing/sequence/
Aplicable also for lists of characters
# Arguments
sequence_vec: list of chars or lists
List of sequences that can have various lengths
value: Neutral element to pad the sequence with. Can be `str` or `list`.
maxlen: int or None; Final lenght of sequences.
If None, maxlen is set to the longest sequence length.
align: character; 'start', 'end' or 'center'
To which end to align the sequences when triming/padding. See examples bellow.
# Returns
List of sequences of the same class as sequence_vec
# Example
```python
>>> sequence_vec = ['CTTACTCAGA', 'TCTTTA']
>>> pad_sequences(sequence_vec, 10, align="start", value="N")
['CTTACTCAGA', 'TCTTTANNNN']
>>> pad_sequences(sequence_vec, 10, align="end", value="N")
['CTTACTCAGA', 'NNNNTCTTTA']
>>> pad_sequences(sequence_vec, 4, align="center", value="N")
['ACTC', 'CTTT']
```
"""
# neutral element type checking
assert isinstance(value, list) or isinstance(value, str)
assert isinstance(value, type(sequence_vec[0]))
assert not isinstance(sequence_vec, str)
assert isinstance(sequence_vec[0], list) or isinstance(sequence_vec[0], str)
max_seq_len = max([len(seq) for seq in sequence_vec])
if maxlen is None:
maxlen = max_seq_len
else:
maxlen = int(maxlen)
if max_seq_len < maxlen:
import warnings
warnings.warn("Maximum sequence length (%s) is less than maxlen (%s)" % (max_seq_len, maxlen))
max_seq_len = maxlen
# check the case when len > 1
for seq in sequence_vec:
if not len(seq) % len(value) == 0:
raise ValueError("All sequences need to be dividable by len(value)")
if not maxlen % len(value) == 0:
raise ValueError("maxlen needs to be dividable by len(value)")
# pad and subset
def pad(seq, max_seq_len, value="N", align="end"):
seq_len = len(seq)
assert max_seq_len >= seq_len
if align == "end":
n_left = max_seq_len - seq_len
n_right = 0
elif align == "start":
n_right = max_seq_len - seq_len
n_left = 0
elif align == "center":
n_left = (max_seq_len - seq_len) // 2 + (max_seq_len - seq_len) % 2
n_right = (max_seq_len - seq_len) // 2
else:
raise ValueError("align can be of: end, start or center")
# normalize for the length
n_left = n_left // len(value)
n_right = n_right // len(value)
return value * n_left + seq + value * n_right
def trim(seq, maxlen, align="end"):
seq_len = len(seq)
assert maxlen <= seq_len
if align == "end":
return seq[-maxlen:]
elif align == "start":
return seq[0:maxlen]
elif align == "center":
dl = seq_len - maxlen
n_left = dl // 2 + dl % 2
n_right = seq_len - dl // 2
return seq[n_left:n_right]
else:
raise ValueError("align can be of: end, start or center")
padded_sequence_vec = [pad(seq, max(max_seq_len, maxlen),
value=value, align=align) for seq in sequence_vec]
padded_sequence_vec = [trim(seq, maxlen, align=align) for seq in padded_sequence_vec]
return padded_sequence_vec
| mit |
jaivardhankapoor/jaivardhankapoor.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
OxfordSKA/bda | pybda/util/cal_table_plot.py | 1 | 2240 | #!/usr/bin/python
import os
import matplotlib.pyplot as plt
import numpy as np
def load_gains(cal_table):
tb.open(cal_table, nomodify=True)
gains = tb.getcol('CPARAM')
tb.close()
tb.open(cal_table+'/ANTENNA')
num_antennas = tb.nrows()
tb.close()
num_times = gains.shape[2]/num_antennas
tb.open(cal_table+'/OBSERVATION')
time_range = tb.getcol('TIME_RANGE')
tb.close()
dt = (time_range[1][0]-time_range[0][0]) / num_times
return gains, num_antennas, num_times, dt
def main():
# ------------------------------------------------------------
# cal_table = os.path.join('vis', 'corrupted.gains')
cal_table = os.path.join('bench_02', 'calibrated.gains')
plot_num_stations = 10 # Number of stations to plot gains for
# ------------------------------------------------------------
gains, num_antennas, num_times, dt = load_gains(cal_table)
if plot_num_stations == -1:
plot_num_stations = num_antennas
gains = gains[0, 0, :]
if 'corrupted' in cal_table :
gains = 1.0/gains
x = np.arange(0, num_times)*dt
fig, axes = plt.subplots(4, 1, sharex=True, sharey=False, figsize=(12,10))
# for i in np.random.randint(0, num_antennas, plot_num_stations):
for i in range(0, plot_num_stations):
axes[0].plot(x, np.abs(gains[i::num_antennas]))
axes[1].plot(x, np.angle(gains[i::num_antennas])*(180.0/np.pi))
axes[2].plot(x, np.real(gains[i::num_antennas]))
axes[3].plot(x, np.imag(gains[i::num_antennas]))
for axis in axes:
axis.grid()
# axes[0].set_title('%s : Gains for %i randomly selected stations' %
# (cal_table, plot_num_stations))
axes[0].set_title('%s : Gains for the first %i stations' %
(cal_table, plot_num_stations))
axes[0].set_ylabel('gain amplitude')
axes[1].set_ylabel('gain phase')
axes[2].set_ylabel('real(gain)')
axes[3].set_ylabel('imag(gain)')
axes[3].set_xlabel('time [seconds]')
axes[3].set_xlim(0, num_times*dt)
plt.tight_layout()
#plt.savefig(cal_table+'.png', transparent=True, frameon=False)
plt.savefig(cal_table+'.png')
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
trungnt13/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 47 | 8095 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
cmohl2013/140327 | tmp.py | 1 | 1236 | import sys
import numpy as np
sys.path.append("/mnt/moehlc/home/idaf_library")
#import mahotas
import libidaf.idafIO as io
import matplotlib.pyplot as plt
import re
path = '/home/moehlc/raman_bloodvessel_dat/filteredVoldDat1/angio_wt/'
path = '/home/moehlc/raman_bloodvessel_dat/filteredVoldDatGauss1/angio_wt/'
path2 = '/home/moehlc/raman_bloodvessel_dat/rawVoldat2/angio_wt/'
pattern = 'filtered_Size_20'
pattern2 = '_trafo'
shape = (320,320,272)
fnames = io.getFilelistFromDir(path,pattern) #list of tiff stacks to be filtered
fnames2 = io.getFilelistFromDir(path2,pattern2)
num = 8
fname = fnames[num]
def matchlist(string,list):
for f in list:
if string.find(f) != -1:
return f
return -1
fname2 = matchlist(fname,fnames2)
print(fname)
print(fname2)
vol_f = np.array(np.memmap(path + fname,dtype = 'float64', mode = 'r', shape = shape))
#vol_gauss = np.array(np.memmap(path3 + fname,dtype = 'float64', mode = 'r', shape = shape))
vol = np.array(np.memmap(path2 + fname2,dtype = 'float64', mode = 'r', shape = shape))
im_f = np.nanmean(vol_f,axis = 2)
im = np.nanmean(vol,axis = 2)
plt.close()
fig = plt.figure()
fig.add_subplot(1,2,1)
plt.imshow(im)
fig.add_subplot(1,2,2)
plt.imshow(im_f)
plt.show() | mit |
frzdian/jaksafe-engine | jaksafe/jaksafe/jakservice/auto_preprocessing/preprocess_fl_report.py | 1 | 8611 | __AUTHOR__= 'FARIZA DIAN PRASETYO'
from jaksafe import *
import simplejson as json
import pandas as pd
import numpy as np
import pandas.io.sql as psql
import Time as t
import os
from header_config_variable import *
from Time import formatted_date_to_timestamp
from Time import timestamp_to_formatted_date
from Time import timestamp_to_date_time
'''
Convert from json format to data frame
'''
def convert_json_to_data_frame(dims_json,request_time):
dims_data_dict = json.loads(dims_json)
df_event = pd.DataFrame.from_dict(dims_data_dict)
print "df_event from dims"
print df_event
dims_column_order = [header_id_distrik,header_kelurahan,header_kecamatan,header_RW,header_RT,header_ketinggian,header_waktu_kejadian]
fl_event_column_order = [header_id_unit,header_village,header_district,header_rw,header_rt,header_depth,header_report_time]
## Adapting the df to database
for idx,col_name in enumerate(dims_column_order):
df_event.rename(columns={col_name:fl_event_column_order[idx]},inplace = True)
## Adapting the df to database
df_event[header_request_time] = request_time
## Changing data type as float
df_event_max = df_event
if not df_event.empty:
df_event[[header_depth]] = df_event[[header_depth]].astype(float)
grb = df_event.groupby(header_id_unit)
df_event_max = grb.aggregate(np.max)
df_event_max[header_id_unit] = df_event_max.index
df_event_max['id'] = range(len(df_event_max))
df_event_max = df_event_max.set_index('id')
df_event_max.index.name = None
return df_event,df_event_max
def convert_json_to_data_frame_update(dims_json,request_time):
### initialize status df_event and df_event_raw
df_event = pd.DataFrame()
df_event_max = df_event
dims_data_dict = json.loads(dims_json)
df_event = pd.DataFrame.from_dict(dims_data_dict[header_dims_reports])
### For the case if empty reports is fetched
if df_event.empty:
print 'empty reports (dict) from dims...'
return df_event,df_event_max
print "Get df_event from dims"
df_event = df_event[df_event[header_id_distrik] != 'null']
df_event = df_event[df_event[header_ketinggian] != 0]
### Empty data frame means no data flood reported
if df_event.empty:
df_event_max = df_event
return df_event,df_event_max
dims_time_format = '%Y-%m-%dT%H:%M'
## Convert to time_series dataframe
df_event[header_waktu_kejadian] = df_event.apply(lambda row: convert_dims_to_jaksafe_datetime(row[header_tanggal_kejadian],dims_time_format),axis = 1)
## Then filtering the time series with t0 and t1
datetime_t1 = convert_dims_to_jaksafe_datetime(request_time,std_time_format)
df_event = df_event[df_event[header_waktu_kejadian] <= datetime_t1]
## drop useless column
df_event = df_event.drop('TANGGAL_SELESAI_KEJADIAN',1)
df_event = df_event.drop(header_tanggal_kejadian,1)
df_event = df_event.drop(header_kodya,1)
dims_column_order = [header_id_distrik,header_kecamatan,header_kelurahan,header_ketinggian,header_rw,header_rt,header_waktu_kejadian]
fl_event_column_order = [header_id_unit,header_district,header_village,header_depth,header_rw,header_rt,header_report_time]
## Adapting the df to database
for idx,col_name in enumerate(dims_column_order):
df_event.rename(columns={col_name:fl_event_column_order[idx]},inplace = True)
df_event[header_request_time] = datetime_t1
## Changing data type as float
df_event_max = df_event
if not df_event.empty:
df_event[[header_depth]] = df_event[[header_depth]].astype(float)
grb = df_event.groupby(header_id_unit)
df_event_max = grb.aggregate(np.max)
df_event_max[header_id_unit] = df_event_max.index
df_event_max['id'] = range(len(df_event_max))
df_event_max = df_event_max.set_index('id')
df_event_max.index.name = None
return df_event,df_event_max
def insert_dims_dataframe_to_database(df_event_dims_raw,df_event_dims,table_raw_name_event,table_name_event,db_con):
print "Inserting data from dims to fl_database ...."
df_event_dims.to_sql(con=db_con, name = table_name_event, if_exists='append', flavor='mysql', index = False)
df_event_dims_raw.to_sql(con=db_con, name = table_raw_name_event, if_exists='append', flavor='mysql', index = False)
def get_latest_fl_event(db_con,table_name,t_now,event_duration):
## Default event_duration = 2 days
print 'get latest fl event... (max 2 days)'
if event_duration < 1 or event_duration > 3:
event_duration = 2
event_duration = (event_duration * 24 * 3600)
ta = t.Time(t_now.timeStamp()-event_duration)
sql_dump = "SELECT * FROM %s WHERE request_time <= '%s' and request_time >= '%s'"%(table_name,t_now.formattedTime(),ta.formattedTime())
print sql_dump
df = psql.read_frame(sql_dump, db_con, index_col = 'id')
return df
def get_fl_event(db_con,table_name,t0,t1):
## Creating sql dump
sql_dump = "SELECT * FROM %s WHERE request_time <= '%s' and request_time >= '%s'"%(table_name,t1,t0)
df = psql.read_frame(sql_dump, db_con, index_col = 'id')
return df
def preprocessing_the_hazard_data(df_units):
requested_times = pd.Series(df_units[header_request_time].values).unique()
df_unit_list = []
for f in requested_times:
df_each = df_units[df_units[header_request_time] == f]
df_each_rw,df_each_rt = split_to_rt_rw(df_each)
if df_each_rt.empty:
pass
else:
df_each_rt['is_overlapped'] = df_each_rt.apply(lambda row: check_overlapped_rt_on_rw(row[header_id_unit],df_each_rw), axis = 1)
## Remove the overlapped
df_each_rt = df_each_rt[df_each_rt['is_overlapped'] == False]
df_each_rt = df_each_rt.drop('is_overlapped',1)
df_each = pd.concat([df_each_rw,df_each_rt])
df_unit_list.append(df_each)
df_units = pd.concat(df_unit_list)
return df_units
def split_to_rt_rw(df_units):
## Get unit rw
df_units_rw = df_units[df_units[header_rt]==""]
## Get unit rt
df_units_rt = df_units[df_units[header_rt]!=""]
return df_units_rw,df_units_rt
def check_overlapped_rt_on_rw(id_rt,df_event_rw):
id_rt = str(id_rt)
id_rw_base = id_rt[:13] + '000'
id_rw_values = map(str,df_event_rw[header_id_unit].values)
if id_rw_base in id_rw_values:
return True
return False
def create_fl_report(df_units,t1,report_RT,report_RW,output_folder):
folder_format = glob_folder_format
t_1 = t1
t1.set_time_format(folder_format)
report_dir_name = 'fl_report'
report_dir = output_folder+'/'+report_dir_name+'/'+t_1.formattedTime()
if not os.path.exists(report_dir):
os.makedirs(report_dir)
df_rw,df_rt = split_to_rt_rw(df_units)
df_rw = df_rw.drop(header_rt,1)
## Creating RW report
rw_report_file = report_dir + "/" + t_1.formattedTime() + '_' + report_RW
df_rw.to_csv(rw_report_file,sep=',',index=False)
## Creating RT report
rw_report_file = report_dir + "/" + t_1.formattedTime() + '_' + report_RT
df_rt.to_csv(rw_report_file,sep=',',index=False)
def create_summary_fl_report(df_units,t0,t1,report_summary,report_RT,report_RW,output_folder):
folder_format = glob_folder_format
t_0 = t0
t_1 = t1
t0.set_time_format(folder_format)
t1.set_time_format(folder_format)
report_dir_name = 'report'
report_dir = output_folder+'/'+ report_dir_name + '/'+ t_0.formattedTime() + '_' + t_1.formattedTime()
if not os.path.exists(report_dir):
os.makedirs(report_dir)
df_rw,df_rt = split_to_rt_rw(df_units)
df_rw = df_rw.drop(header_rt,1)
## Creating RW report
rw_report_file = report_dir + "/" + t_0.formattedTime() + '_' + t_1.formattedTime() + '_' + report_RW
df_rw.to_csv(rw_report_file,sep=',',index=False)
## Creating RT report
rt_report_file = report_dir + "/" + t_0.formattedTime() + '_' + t_1.formattedTime() + '_' + report_RT
df_rt.to_csv(rt_report_file,sep=',',index=False)
## Creating all summary report
summary_report_file = report_dir + "/" + t_0.formattedTime() + '_' + t_1.formattedTime() + '_' + report_summary
df_units.to_csv(summary_report_file,sep=',',index=False)
def convert_dims_to_jaksafe_datetime(inputTime,time_format):
timestamp = formatted_date_to_timestamp(inputTime,time_format)
datetime = timestamp_to_date_time(timestamp)
return datetime
| gpl-2.0 |
rachelalbert/image-analogies-python | algorithms.py | 1 | 5035 | from itertools import product
import numpy as np
from numpy.linalg import norm
import pyflann as pf
from sklearn.feature_extraction.image import extract_patches_2d
from img_preprocess import px2ix, pad_img_pair, Ap_ix2px, Ap_px2ix
def compute_feature_array(im_pyr, c, full_feat):
# features will be organized like this:
# sm_imA, lg_imA (channels shuffled C-style)
# create a list of features for each pyramid level
# level 0 is empty for indexing alignment
im_features = [[]]
# pad each pyramid level to avoid edge problems
for level in range(1, len(im_pyr)):
padded_sm, padded_lg = pad_img_pair(im_pyr[level - 1], im_pyr[level], c)
patches_sm = extract_patches_2d(padded_sm, (c.n_sm, c.n_sm))
patches_lg = extract_patches_2d(padded_lg, (c.n_lg, c.n_lg))
assert(patches_sm.shape[0] == im_pyr[level - 1].shape[0] * im_pyr[level - 1].shape[1])
assert(patches_lg.shape[0] == im_pyr[level ].shape[0] * im_pyr[level ].shape[1])
# discard second half of larger feature vector
if not full_feat:
patches_lg = patches_lg.reshape(patches_lg.shape[0], -1)[:, :c.num_ch * c.n_half]
# concatenate small and large patches
level_features = []
imh, imw = im_pyr[level].shape[:2]
for row in range(imh):
for col in range(imw):
level_features.append(np.hstack([
patches_sm[np.floor(row/2.) * np.ceil(imw/2.) + np.floor(col/2.)].flatten(),
patches_lg[row * imw + col].flatten()
]))
assert(len(level_features) == imh * imw)
# final feature array is n_pixels by f_length
im_features.append(np.vstack(level_features))
return im_features
def create_index(A_pyr, Ap_pyr_list, c):
A_feat = compute_feature_array(A_pyr, c, full_feat=True)
Ap_feat_list = []
for Ap_pyr in Ap_pyr_list:
Ap_feat_list.append(compute_feature_array(Ap_pyr, c, full_feat=False))
flann = [pf.FLANN() for _ in xrange(c.max_levels)]
flann_params = [list([]) for _ in xrange(c.max_levels)]
As = [list([]) for _ in xrange(c.max_levels)]
As_size = [list([]) for _ in xrange(c.max_levels)]
for level in range(1, c.max_levels):
print('Building index for level %d out of %d' % (level, c.max_levels - 1))
As_list = []
for Ap_feat in Ap_feat_list:
As_list.append(np.hstack([A_feat[level], Ap_feat[level]]))
As[level] = np.vstack(As_list)
As_size[level] = As[level].shape
flann_params[level] = flann[level].build_index(As[level], algorithm='kdtree')
return flann, flann_params, As, As_size
def best_approximate_match(flann, params, BBp_feat):
result, dists = flann.nn_index(BBp_feat, 1, checks=params['checks'])
return result[0]
def extract_pixel_feature((im_sm_padded, im_lg_padded), (row, col), c, full_feat):
# first extract full feature vector
# since the images are padded, we need to add the padding to our indexing
px_feat = np.hstack([im_sm_padded[np.floor(row/2.) : np.floor(row/2.) + 2 * c.pad_sm + 1, \
np.floor(col/2.) : np.floor(col/2.) + 2 * c.pad_sm + 1].flatten(),
im_lg_padded[row : row + 2 * c.pad_lg + 1,
col : col + 2 * c.pad_lg + 1].flatten()])
if full_feat:
return px_feat
else:
# only keep c.n_half pixels from second level
return px_feat[:c.num_ch * ((c.n_sm * c.n_sm) + c.n_half)]
def best_coherence_match(As, (A_h, A_w), BBp_feat, s, im, px, Bp_w, c):
assert(len(s) >= 1)
row, col = px
# construct iterables
rs = []
ims = []
prs = []
rows = np.arange(np.max([0, row - c.pad_lg]), row + 1, dtype=int)
cols = np.arange(np.max([0, col - c.pad_lg]), np.min([Bp_w, col + c.pad_lg + 1]), dtype=int)
for r_coord in product(rows, cols):
# discard anything after current pixel
if px2ix(r_coord, Bp_w) < px2ix(px, Bp_w):
# p_r = s(r) + (q - r)
# pr is an index in a given image Ap_list[img_num]
pr = s[px2ix(r_coord, Bp_w)] + px - r_coord
# i is a list of image nums for each pixel in Bp
img_nums = im[px2ix(r_coord, Bp_w)]
# discard anything outside the bounds of A/Ap lg
if 0 <= pr[0] < A_h and 0 <= pr[1] < A_w:
rs.append(np.array(r_coord))
ims.append(img_nums)
prs.append(Ap_px2ix(pr, img_nums, A_h, A_w))
if not rs:
# no good coherence match
return (-1, -1), 0, (0, 0)
rix = np.argmin(norm(As[np.array(prs)] - BBp_feat, ord=2, axis=1))
r_star = rs[rix]
i_star = ims[rix]
# s[r_star] + (q - r-star)
return s[px2ix(r_star, Bp_w)] + px - r_star, i_star, r_star
def compute_distance(AAp_p, BBp_q, weights):
assert(AAp_p.shape == BBp_q.shape == weights.shape)
return norm((AAp_p - BBp_q) * weights, ord=2)**2
| mit |
stijnvanhoey/pystran | docs/source/conf.py | 1 | 9473 | # -*- coding: utf-8 -*-
#
# pystran documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 14 09:48:33 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc',
'sphinx.ext.autosummary',
'matplotlib.sphinxext.mathmpl'
]
# not automatically make a summary list of all class members
# which causes a bunch of warnings
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pystran'
copyright = u'2012, S. Van Hoey'
author = u'S. Van Hoey'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pystrandoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pystran.tex', u'pystran Documentation',
u'S. Van Hoey', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pystran', u'pystran Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pystran', u'pystran Documentation',
author, 'pystran', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause |
BiaDarkia/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 58 | 2510 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
# #############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
# #############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
# #############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
# #############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
ibamacsr/cloud_stats | cloudstats/cloudstats.py | 2 | 2345 | import simplejson
from homura import download
from datetime import datetime
from pandas import read_csv
def get_scene_names(geojson_file):
"""Open a geojson file and get the scene names list."""
json = open(geojson_file, 'r').read()
data = simplejson.loads(json)
return [scene['properties']['name'] for scene in data['features']]
def get_metadata(download_dir='.'):
"""Download Landsat 8 metadata file."""
download(
'http://landsat.usgs.gov/metadata_service/bulk_metadata_files/LANDSAT_8.csv',
download_dir
)
class Stats(object):
def __init__(self, start_date, end_date, scene_list, landsat='LANDSAT_8.csv'):
self.start_date = datetime.strptime(start_date, '%Y%m%d').date()
self.end_date = datetime.strptime(end_date, '%Y%m%d').date()
self.scene_list = scene_list
self.data = read_csv(
landsat,
parse_dates=['acquisitionDate'],
usecols=['path', 'row', 'acquisitionDate', 'cloudCoverFull']
)
def filter_by_date(self):
"""Filter the Landsat scenes by date of acquisition."""
filtered_data = self.data[(
(self.data['acquisitionDate'] >= self.start_date) &
(self.data['acquisitionDate'] <= self.end_date)
)]
return filtered_data
def filter_by_scenes(self, data=None):
"""Filter the Landsat scenes by path and row, using the scene_list
parameter."""
if data is None:
data = self.data
data_list = data.values.tolist()
selected = []
for item in data_list:
path_row = '%s-%s' % (item[1], item[2])
if path_row in self.scene_list:
selected.append(item)
return selected
def calc_rate(self, data=None):
"""Calculate the cloud cover rate of Landsat scenes."""
if data is None:
data = self.data
if type(data) != list:
data = data.values.tolist()
clouds = [i[-1] for i in data]
if len(clouds) == 0:
return "No scenes selected"
else:
return sum(clouds) / len(clouds)
def full_calc(self):
"""Calculate the cloud cover rate from filtered scenes."""
return self.calc_rate(self.filter_by_scenes(self.filter_by_date()))
| agpl-3.0 |
alekz112/statsmodels | statsmodels/sandbox/examples/ex_mixed_lls_0.py | 34 | 5233 | # -*- coding: utf-8 -*-
"""Example using OneWayMixed
Created on Sat Dec 03 10:15:55 2011
Author: Josef Perktold
This example constructs a linear model with individual specific random
effects and random coefficients, and uses OneWayMixed to estimate it.
"""
from __future__ import print_function
import numpy as np
from statsmodels.sandbox.panel.mixed import OneWayMixed, Unit
examples = ['ex1']
if 'ex1' in examples:
#np.random.seed(54321)
np.random.seed(978326)
nsubj = 2000
units = []
nobs_i = 4 #number of observations per unit, changed below
nx = 4 #number fixed effects
nz = 2 ##number random effects
beta = np.ones(nx)
gamma = 0.5 * np.ones(nz) #mean of random effect
gamma[0] = 0
gamma_re_true = []
for i in range(nsubj):
#create data for one unit
#random effect/coefficient
gamma_re = gamma + 0.2 * np.random.standard_normal(nz)
#store true parameter for checking
gamma_re_true.append(gamma_re)
#for testing unbalanced case, let's change nobs per unit
if i > nsubj//4:
nobs_i = 6
#generate exogenous variables
X = np.random.standard_normal((nobs_i, nx))
Z = np.random.standard_normal((nobs_i, nz-1))
Z = np.column_stack((np.ones(nobs_i), Z))
noise = 0.1 * np.random.randn(nobs_i) #sig_e = 0.1
#generate endogenous variable
Y = np.dot(X, beta) + np.dot(Z, gamma_re) + noise
#add random effect design matrix also to fixed effects to
#capture the mean
#this seems to be necessary to force mean of RE to zero !?
#(It's not required for estimation but interpretation of random
#effects covariance matrix changes - still need to check details.
X = np.hstack((X,Z))
#create units and append to list
unit = Unit(Y, X, Z)
units.append(unit)
m = OneWayMixed(units)
import time
t0 = time.time()
m.initialize()
res = m.fit(maxiter=100, rtol=1.0e-5, params_rtol=1e-6, params_atol=1e-6)
t1 = time.time()
print('time for initialize and fit', t1-t0)
print('number of iterations', m.iterations)
#print(dir(m)
#print(vars(m)
print('\nestimates for fixed effects')
print(m.a)
print(m.params)
bfixed_cov = m.cov_fixed()
print('beta fixed standard errors')
print(np.sqrt(np.diag(bfixed_cov)))
print(m.bse)
b_re = m.params_random_units
print('RE mean:', b_re.mean(0))
print('RE columns std', b_re.std(0))
print('np.cov(b_re, rowvar=0), sample statistic')
print(np.cov(b_re, rowvar=0))
print('std of above')
print(np.sqrt(np.diag(np.cov(b_re, rowvar=0))))
print('m.cov_random()')
print(m.cov_random())
print('std of above')
print(res.std_random())
print(np.sqrt(np.diag(m.cov_random())))
print('\n(non)convergence of llf')
print(m.history['llf'][-4:])
print('convergence of parameters')
#print(np.diff(np.vstack(m.history[-4:])[:,1:],axis=0)
print(np.diff(np.vstack(m.history['params'][-4:]),axis=0))
print('convergence of D')
print(np.diff(np.array(m.history['D'][-4:]), axis=0))
#zdotb = np.array([np.dot(unit.Z, unit.b) for unit in m.units])
zb = np.array([(unit.Z * unit.b[None,:]).sum(0) for unit in m.units])
'''if Z is not included in X:
>>> np.dot(b_re.T, b_re)/100
array([[ 0.03270611, -0.00916051],
[-0.00916051, 0.26432783]])
>>> m.cov_random()
array([[ 0.0348722 , -0.00909159],
[-0.00909159, 0.26846254]])
>>> #note cov_random doesn't subtract mean!
'''
print('\nchecking the random effects distribution and prediction')
gamma_re_true = np.array(gamma_re_true)
print('mean of random effect true', gamma_re_true.mean(0))
print('mean from fixed effects ', m.params[-2:])
print('mean of estimated RE ', b_re.mean(0))
print('')
absmean_true = np.abs(gamma_re_true).mean(0)
mape = ((m.params[-2:] + b_re) / gamma_re_true - 1).mean(0)*100
mean_abs_perc = np.abs((m.params[-2:] + b_re) - gamma_re_true).mean(0) \
/ absmean_true*100
median_abs_perc = np.median(np.abs((m.params[-2:] + b_re) - gamma_re_true), 0) \
/ absmean_true*100
rmse_perc = ((m.params[-2:] + b_re) - gamma_re_true).std(0) \
/ absmean_true*100
print('mape ', mape)
print('mean_abs_perc ', mean_abs_perc)
print('median_abs_perc', median_abs_perc)
print('rmse_perc (std)', rmse_perc)
from numpy.testing import assert_almost_equal
#assert is for n_units=100 in original example
#I changed random number generation, so this won't work anymore
#assert_almost_equal(rmse_perc, [ 34.14783884, 11.6031684 ], decimal=8)
#now returns res
print(res.llf) #based on MLE, does not include constant
print(res.tvalues)
print(res.pvalues)
print(res.t_test([1,-1,0,0,0,0]))
print('test mean of both random effects variables is zero')
print(res.f_test([[0,0,0,0,1,0], [0,0,0,0,0,1]]))
plots = res.plot_random_univariate(bins=50)
fig = res.plot_scatter_pairs(0, 1)
import matplotlib.pyplot as plt
plt.show()
| bsd-3-clause |
aabadie/scikit-learn | sklearn/ensemble/forest.py | 5 | 66535 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'subsample', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated in 0.17 and"
"will be removed in 0.19. It was replaced by "
"class_weight='balanced_subsample' using the balanced"
"strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
c-lipka/povray | tools/meta-make/bluenoise/BlueNoise.py | 4 | 23374 | # BlueNoise.py - An implementation of the void and cluster method for generation of
# blue noise dither arrays and related utilities.
#
# Written in 2016 by Christoph Peters, Christoph(at)MomentsInGraphics.de
#
# To the extent possible under law, the author(s) have dedicated all copyright and
# related and neighboring rights to this software to the public domain worldwide.
# This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along with
# this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
from os import path,makedirs
import numpy as np
from scipy import ndimage
from matplotlib import pyplot
import png
import threading
import struct
def GetBayerPattern(Log2Width):
"""Creates a two-dimensional Bayer pattern with a width and height of
2**Log2Width."""
X,Y=np.meshgrid(range(2**Log2Width),range(2**Log2Width));
Result=np.zeros_like(X);
for i in range(Log2Width):
StripesY=np.where(np.bitwise_and(Y,2**(Log2Width-1-i))!=0,1,0);
StripesX=np.where(np.bitwise_and(X,2**(Log2Width-1-i))!=0,1,0);
Checker=np.bitwise_xor(StripesX,StripesY);
Result+=np.bitwise_or(StripesY*2**(2*i),Checker*2**(2*i+1));
return Result;
def FindLargestVoid(BinaryPattern,StandardDeviation):
"""This function returns the indices of the largest void in the given binary
pattern as defined by Ulichney.
\param BinaryPattern A boolean array (should be two-dimensional although the
implementation works in arbitrary dimensions).
\param StandardDeviation The standard deviation used for the Gaussian filter
in pixels. This can be a single float for an isotropic Gaussian or a
tuple with one float per dimension for an anisotropic Gaussian.
\return A flat index i such that BinaryPattern.flat[i] corresponds to the
largest void. By definition this is a majority pixel.
\sa GetVoidAndClusterBlueNoise"""
# The minority value is always True for convenience
if(np.count_nonzero(BinaryPattern)*2>=np.size(BinaryPattern)):
BinaryPattern=np.logical_not(BinaryPattern);
# Apply the Gaussian. We do not want to cut off the Gaussian at all because even
# the tiniest difference can change the ranking. Therefore we apply the Gaussian
# through a fast Fourier transform by means of the convolution theorem.
FilteredArray=np.fft.ifftn(ndimage.fourier.fourier_gaussian(np.fft.fftn(np.where(BinaryPattern,1.0,0.0)),StandardDeviation)).real;
# Find the largest void
return np.argmin(np.where(BinaryPattern,2.0,FilteredArray));
def FindTightestCluster(BinaryPattern,StandardDeviation):
"""Like FindLargestVoid() but finds the tightest cluster which is a minority
pixel by definition.
\sa GetVoidAndClusterBlueNoise"""
if(np.count_nonzero(BinaryPattern)*2>=np.size(BinaryPattern)):
BinaryPattern=np.logical_not(BinaryPattern);
FilteredArray=np.fft.ifftn(ndimage.fourier.fourier_gaussian(np.fft.fftn(np.where(BinaryPattern,1.0,0.0)),StandardDeviation)).real;
return np.argmax(np.where(BinaryPattern,FilteredArray,-1.0));
def GetVoidAndClusterBlueNoise(OutputShape,StandardDeviation=1.5,InitialSeedFraction=0.1):
"""Generates a blue noise dither array of the given shape using the method
proposed by Ulichney [1993] in "The void-and-cluster method for dither array
generation" published in Proc. SPIE 1913.
\param OutputShape The shape of the output array. This function works in
arbitrary dimension, i.e. OutputShape can have arbitrary length. Though
it is only tested for the 2D case where you should pass a tuple
(Height,Width).
\param StandardDeviation The standard deviation in pixels used for the
Gaussian filter defining largest voids and tightest clusters. Larger
values lead to more low-frequency content but better isotropy. Small
values lead to more ordered patterns with less low-frequency content.
Ulichney proposes to use a value of 1.5. If you want an anisotropic
Gaussian, you can pass a tuple of length len(OutputShape) with one
standard deviation per dimension.
\param InitialSeedFraction The only non-deterministic step in the algorithm
marks a small number of pixels in the grid randomly. This parameter
defines the fraction of such points. It has to be positive but less
than 0.5. Very small values lead to ordered patterns, beyond that there
is little change.
\return An integer array of shape OutputShape containing each integer from 0
to np.prod(OutputShape)-1 exactly once."""
nRank=np.prod(OutputShape);
# Generate the initial binary pattern with a prescribed number of ones
nInitialOne=max(1,min(int((nRank-1)/2),int(nRank*InitialSeedFraction)));
# Start from white noise (this is the only randomized step)
InitialBinaryPattern=np.zeros(OutputShape,dtype=np.bool);
InitialBinaryPattern.flat=np.random.permutation(np.arange(nRank))<nInitialOne;
# Swap ones from tightest clusters to largest voids iteratively until convergence
while(True):
iTightestCluster=FindTightestCluster(InitialBinaryPattern,StandardDeviation);
InitialBinaryPattern.flat[iTightestCluster]=False;
iLargestVoid=FindLargestVoid(InitialBinaryPattern,StandardDeviation);
if(iLargestVoid==iTightestCluster):
InitialBinaryPattern.flat[iTightestCluster]=True;
# Nothing has changed, so we have converged
break;
else:
InitialBinaryPattern.flat[iLargestVoid]=True;
# Rank all pixels
DitherArray=np.zeros(OutputShape,dtype=np.int);
# Phase 1: Rank minority pixels in the initial binary pattern
BinaryPattern=np.copy(InitialBinaryPattern);
for Rank in range(nInitialOne-1,-1,-1):
iTightestCluster=FindTightestCluster(BinaryPattern,StandardDeviation);
BinaryPattern.flat[iTightestCluster]=False;
DitherArray.flat[iTightestCluster]=Rank;
# Phase 2: Rank the remainder of the first half of all pixels
BinaryPattern=InitialBinaryPattern;
for Rank in range(nInitialOne,int((nRank+1)/2)):
iLargestVoid=FindLargestVoid(BinaryPattern,StandardDeviation);
BinaryPattern.flat[iLargestVoid]=True;
DitherArray.flat[iLargestVoid]=Rank;
# Phase 3: Rank the last half of pixels
for Rank in range(int((nRank+1)/2),nRank):
iTightestCluster=FindTightestCluster(BinaryPattern,StandardDeviation);
BinaryPattern.flat[iTightestCluster]=True;
DitherArray.flat[iTightestCluster]=Rank;
return DitherArray;
def AnalyzeNoiseTexture(Texture,SingleFigure=True,SimpleLabels=False):
"""Given a 2D array of real noise values this function creates one or more
figures with plots that allow you to analyze it, especially with respect to
blue noise characteristics. The analysis includes the absolute value of the
Fourier transform, the power distribution in radial frequency bands and an
analysis of directional isotropy.
\param A two-dimensional array.
\param SingleFigure If this is True, all plots are shown in a single figure,
which is useful for on-screen display. Otherwise one figure per plot
is created.
\param SimpleLabels Pass True to get axis labels that fit into the context of
the blog post without further explanation.
\return A list of all created figures.
\note For the plots to show you have to invoke pyplot.show()."""
FigureList=list();
if(SingleFigure):
Figure=pyplot.figure();
FigureList.append(Figure);
def PrepareAxes(iAxes,**KeywordArguments):
if(SingleFigure):
return Figure.add_subplot(2,2,iAxes,**KeywordArguments);
else:
NewFigure=pyplot.figure();
FigureList.append(NewFigure);
return NewFigure.add_subplot(1,1,1,**KeywordArguments);
# Plot the dither array itself
PrepareAxes(1,title="Blue noise dither array");
pyplot.imshow(Texture.real,cmap="gray",interpolation="nearest");
# Plot the Fourier transform with frequency zero shifted to the center
PrepareAxes(2,title="Fourier transform (absolute value)",xlabel="$\\omega_x$",ylabel="$\\omega_y$");
DFT=np.fft.fftshift(np.fft.fft2(Texture))/float(np.size(Texture));
Height,Width=Texture.shape;
ShiftY,ShiftX=(int(Height/2),int(Width/2));
pyplot.imshow(np.abs(DFT),cmap="viridis",interpolation="nearest",vmin=0.0,vmax=np.percentile(np.abs(DFT),99),extent=(-ShiftX-0.5,Width-ShiftX-0.5,-ShiftY+0.5,Height-ShiftY+0.5));
pyplot.colorbar();
# Plot the distribution of power over radial frequency bands
PrepareAxes(3,title="Radial power distribution",xlabel="Distance from center / pixels" if SimpleLabels else "$\\sqrt{\\omega_x^2+\\omega_y^2}$");
X,Y=np.meshgrid(range(DFT.shape[1]),range(DFT.shape[0]));
X-=int(DFT.shape[1]/2);
Y-=int(DFT.shape[0]/2);
RadialFrequency=np.asarray(np.round(np.sqrt(X**2+Y**2)),dtype=np.int);
RadialPower=np.zeros((np.max(RadialFrequency)-1,));
DFT[int(DFT.shape[0]/2),int(DFT.shape[1]/2)]=0.0;
for i in range(RadialPower.shape[0]):
RadialPower[i]=np.sum(np.where(RadialFrequency==i,np.abs(DFT),0.0))/np.count_nonzero(RadialFrequency==i);
pyplot.plot(np.arange(np.max(RadialFrequency)-1)+0.5,RadialPower);
# Plot the distribution of power over angular frequency ranges
PrepareAxes(4,title="Anisotropy (angular power distribution)",aspect="equal",xlabel="Frequency x" if SimpleLabels else "$\\omega_x$",ylabel="Frequency y" if SimpleLabels else "$\\omega_y$");
CircularMask=np.logical_and(0<RadialFrequency,RadialFrequency<int(min(DFT.shape[0],DFT.shape[1])/2));
NormalizedX=np.asarray(X,dtype=np.float)/np.maximum(1.0,np.sqrt(X**2+Y**2));
NormalizedY=np.asarray(Y,dtype=np.float)/np.maximum(1.0,np.sqrt(X**2+Y**2));
BinningAngle=np.linspace(0.0,2.0*np.pi,33);
AngularPower=np.zeros_like(BinningAngle);
for i,Angle in enumerate(BinningAngle):
DotProduct=NormalizedX*np.cos(Angle)+NormalizedY*np.sin(Angle);
FullMask=np.logical_and(CircularMask,DotProduct>=np.cos(np.pi/32.0));
AngularPower[i]=np.sum(np.where(FullMask,np.abs(DFT),0.0))/np.count_nonzero(FullMask);
MeanAngularPower=np.mean(AngularPower[1:]);
DenseAngle=np.linspace(0.0,2.0*np.pi,256);
pyplot.plot(np.cos(DenseAngle)*MeanAngularPower,np.sin(DenseAngle)*MeanAngularPower,color=(0.7,0.7,0.7));
pyplot.plot(np.cos(BinningAngle)*AngularPower,np.sin(BinningAngle)*AngularPower);
return FigureList;
def PlotBinaryPatterns(Texture,nPatternRow,nPatternColumn):
"""This function creates a figure with a grid of thresholded versions of the
given 2D noise texture. It assumes that each value from 0 to
np.size(Texture)-1 is contained exactly once.
\return The created figure.
\note For the plots to show you have to invoke pyplot.show()."""
Figure=pyplot.figure();
nPattern=nPatternRow*nPatternColumn+1;
for i in range(1,nPattern):
Figure.add_subplot(nPatternRow,nPatternColumn,i,xticks=[],yticks=[]);
pyplot.imshow(np.where(Texture*nPattern<i*np.size(Texture),1.0,0.0),cmap="gray",interpolation="nearest");
return Figure;
def StoreNoiseTextureLDR(Texture,OutputPNGFilePath,nRank=-1):
"""This function stores the given texture to a standard low-dynamic range png
file with four channels and 8 bits per channel.
\param Texture An array of shape (Height,Width) or (Height,Width,nChannel).
The former is handled like (Height,Width,1). If nChannel>4 the
superfluous channels are ignored. If nChannel<4 the data is expanded.
The alpha channel is set to 255, green and blue are filled with black
or with duplicates of red if nChannel==1. It is assumed that each
channel contains every integer value from 0 to nRank-1 exactly once.
The range of values is remapped linearly to span the range from 0 to
255.
\param OutputPNGFilePath The path to the output png file including the file
format extension.
\param nRank Defaults to Width*Height if you pass a non-positive value."""
# Scale the array to an LDR version
if(nRank<=0):
nRank=Texture.shape[0]*Texture.shape[1];
Texture=np.asarray((Texture*256)//nRank,dtype=np.uint8);
# Get a three-dimensional array
if(len(Texture.shape)<3):
Texture=Texture[:,:,np.newaxis];
# Generate channels as needed
if(Texture.shape[2]==1):
Texture=np.dstack([Texture]*3+[255*np.ones_like(Texture[:,:,0])]);
elif(Texture.shape[2]==2):
Texture=np.dstack([Texture[:,:,0],Texture[:,:,1]]+[np.zeros_like(Texture[:,:,0])]+[255*np.ones_like(Texture[:,:,0])]);
elif(Texture.shape[2]==3):
Texture=np.dstack([Texture[:,:,0],Texture[:,:,1],Texture[:,:,2]]+[255*np.ones_like(Texture[:,:,0])]);
elif(Texture.shape[2]>4):
Texture=Texture[:,:,:4];
# Save the image
png.from_array(Texture,"RGBA;8").save(OutputPNGFilePath);
def StoreNoiseTextureHDR(Texture,OutputPNGFilePath,nRank=-1):
"""This function stores the given texture to an HDR png file with 16 bits per
channel and the specified number of channels.
\param Texture An array of shape (Height,Width) or (Height,Width,nChannel).
The former is handled like (Height,Width,1). It is assumed that each
channel contains each integer value from 0 to nRank-1 exactly once. The
range of values is remapped linearly to span the range from 0 to
2**16-1 supported by the output format. nChannel can be 1, 2, 3 or 4.
\param OutputPNGFilePath The path to the output *.png file including the file
format extension.
\param nRank Defaults to Width*Height if you pass a non-positive value."""
# Scale the array to an HDR version
if(nRank<=0):
nRank=Texture.shape[0]*Texture.shape[1];
Texture=np.asarray((np.asarray(Texture,dtype=np.uint64)*(2**16))//nRank,dtype=np.uint16);
# Get a three-dimensional array
if(len(Texture.shape)<3):
Texture=Texture[:,:,np.newaxis];
# Save the image
Mode=["L","LA","RGB","RGBA"][Texture.shape[2]-1]+";16";
png.from_array(Texture,Mode).save(OutputPNGFilePath);
def StoreNDTextureHDR(Array,OutputFilePath):
"""This function stores the given unsigned integer array in a minimalist binary
file format. The last dimension is interpreted as corresponding to the
channels of the image. The file format consists of a sequence of unsigned,
least significant bit first 32-bit integers. The contained data is described
below:
- Version: File format version, should be 1.
- nChannel: The number of color channels in the image. This should be a value
between 1 (greyscale) and 4 (RGBA).
- nDimension: The number of dimensions of the stored array, i.e. the number of
indices required to uniquely identify one pixel, voxel, etc..
- Shape[nDimension]: nDimension integers providing the size of the array along
each dimension. By convention the first dimension is height, second width
and third depth.
- Data[Shape[0]*...*Shape[nDimension-1]*nChannel]: The uncompressed data of
the array. The channels are unrolled first, followed by all dimensions in
reverse order. Thus, an RG image of size 3*2 would be stored in the
following order: 00R, 00G, 01R, 01G, 10R, 10G, 11R, 11G, 20R, 20G, 21R,
21G"""
# Prepare all the meta data and the data itself
Array=np.asarray(Array,dtype=np.uint32);
Version=1;
nDimension=len(Array.shape)-1;
nChannel=Array.shape[nDimension];
Shape=Array.shape[0:nDimension];
Data=Array.flatten("C");
# Write it to the file
OutputFile=open(OutputFilePath,"wb");
OutputFile.write(struct.pack("LLL",Version,nChannel,nDimension));
OutputFile.write(struct.pack("L"*nDimension,*Shape));
OutputFile.write(struct.pack("L"*np.size(Data),*Data));
OutputFile.close();
def LoadNDTextureHDR(SourceFilePath):
"""Loads a file generated by StoreNDTextureHDR() and returns it as an array like
the one that goes into StoreNDTextureHDR() using data type np.uint32. On
failure it returns None."""
# Load the meta data
File=open(SourceFilePath,"rb");
Version,nChannel,nDimension=struct.unpack_from("LLL",File.read(12));
if(Version!=1):
return None;
Shape=struct.unpack_from("L"*nDimension,File.read(4*nDimension));
nScalar=np.prod(Shape)*nChannel;
Data=struct.unpack_from("L"*nScalar,File.read(4*nScalar));
File.close();
# Prepare the output
return np.asarray(Data,dtype=np.uint32).reshape(tuple(list(Shape)+[nChannel]),order="C");
def GenerateBlueNoiseDatabase(RandomSeedIndexList=range(1),MinResolution=16,MaxResolution=1024,ChannelCountList=[1,2,3,4],StandardDeviation=1.5):
"""This function generates a database of blue noise textures for all sorts of
use cases. It includes power-of-two resolutions from MinResolution**2 up
to MaxResolution**2. Textures are generated with each given number of
channels. Each texture is generated multiple times using different random
numbers per entry in RandomSeedIndexList and the entries become part of the
file name. StandardDeviation forwards to GetVoidAndClusterBlueNoise(). The
results are stored as LDR and HDR files to a well-organized tree of
of directories."""
Resolution=MinResolution;
while(Resolution<=MaxResolution):
OutputDirectory="../Data/%d_%d"%(Resolution,Resolution);
if(not path.exists(OutputDirectory)):
makedirs(OutputDirectory);
for nChannel in ChannelCountList:
for i in RandomSeedIndexList:
Texture=np.dstack([GetVoidAndClusterBlueNoise((Resolution,Resolution),StandardDeviation) for j in range(nChannel)]);
LDRFormat=["LLL1","RG01","RGB1","RGBA"][nChannel-1];
HDRFormat=["L","LA","RGB","RGBA"][nChannel-1];
StoreNoiseTextureLDR(Texture,path.join(OutputDirectory,"LDR_%s_%d.png"%(LDRFormat,i)));
StoreNoiseTextureHDR(Texture,path.join(OutputDirectory,"HDR_%s_%d.png"%(HDRFormat,i)));
print("%d*%d, %s, %d"%(Resolution,Resolution,LDRFormat,i));
Resolution*=2;
def Generate3DBlueNoiseTexture(Width,Height,Depth,nChannel,StandardDeviation=1.5):
"""This function generates a single 3D blue noise texture with the specified
dimensions and number of channels. It then outputs it to a sequence of Depth
output files in LDR and HDR in a well-organized tree of directories. It also
outputs raw binary files.
\sa StoreNDTextureHDR() """
OutputDirectory="../Data/%d_%d_%d"%(Width,Height,Depth);
if(not path.exists(OutputDirectory)):
makedirs(OutputDirectory);
# Generate the blue noise for the various channels using multi-threading
ChannelTextureList=[None]*nChannel;
ChannelThreadList=[None]*nChannel;
def GenerateAndStoreTexture(Index):
ChannelTextureList[Index]=GetVoidAndClusterBlueNoise((Height,Width,Depth),StandardDeviation);
for i in range(nChannel):
ChannelThreadList[i]=threading.Thread(target=GenerateAndStoreTexture,args=(i,));
ChannelThreadList[i].start();
for Thread in ChannelThreadList:
Thread.join();
Texture=np.concatenate([ChannelTextureList[i][:,:,:,np.newaxis] for i in range(nChannel)],3);
LDRFormat=["LLL1","RG01","RGB1","RGBA"][nChannel-1];
HDRFormat=["L","LA","RGB","RGBA"][nChannel-1];
StoreNDTextureHDR(Texture,path.join(OutputDirectory,"HDR_"+HDRFormat+".raw"));
for i in range(Depth):
StoreNoiseTextureLDR(Texture[:,:,i,:],path.join(OutputDirectory,"LDR_%s_%d.png"%(LDRFormat,i)),Height*Width*Depth);
StoreNoiseTextureHDR(Texture[:,:,i,:],path.join(OutputDirectory,"HDR_%s_%d.png"%(HDRFormat,i)),Height*Width*Depth);
def GenerateNDBlueNoiseTexture(Shape,nChannel,OutputFilePath,StandardDeviation=1.5):
"""This function generates a single n-dimensional blue noise texture with the
specified shape and number of channels. It then outputs it to the specified
raw binary file.
\sa StoreNDTextureHDR() """
OutputDirectory=path.split(OutputFilePath)[0];
if(not path.exists(OutputDirectory)):
makedirs(OutputDirectory);
# Generate the blue noise for the various channels using multi-threading
ChannelTextureList=[None]*nChannel;
ChannelThreadList=[None]*nChannel;
def GenerateAndStoreTexture(Index):
ChannelTextureList[Index]=GetVoidAndClusterBlueNoise(Shape,StandardDeviation);
for i in range(nChannel):
ChannelThreadList[i]=threading.Thread(target=GenerateAndStoreTexture,args=(i,));
ChannelThreadList[i].start();
for Thread in ChannelThreadList:
Thread.join();
Texture=np.concatenate([ChannelTextureList[i][...,np.newaxis] for i in range(nChannel)],len(Shape));
StoreNDTextureHDR(Texture,OutputFilePath);
def UniformToTriangularDistribution(UniformTexture):
"""Given an array with a uniform distribution of values, this function
constructs an array of equal shape with a triangular distribution of values.
This is accomplished by applying a differentiable, monotonously growing
function per entry.
\param UniformTexture An integer array containing each value from 0 to
np.size(UniformTexture)-1 exactly once.
\return A floating-point array with values between -1 and 1 where the density
grows linearly between -1 and 0 and falls linearly between 0 and 1."""
Normalized=(np.asarray(UniformTexture,dtype=np.float)+0.5)/float(np.size(UniformTexture));
return np.where(Normalized<0.5,np.sqrt(2.0*Normalized)-1.0,1.0-np.sqrt(2.0-2.0*Normalized));
if(__name__=="__main__"):
#GenerateBlueNoiseDatabase(range(64),16,64,range(1,5),1.9);
#GenerateBlueNoiseDatabase(range(16),128,128,range(1,5),1.9);
#GenerateBlueNoiseDatabase(range(8),256,256,range(1,5),1.9);
#GenerateBlueNoiseDatabase(range(1),512,512,range(1,5),1.9);
#GenerateBlueNoiseDatabase(range(1),1024,1024,[4],1.9);
#for nChannel in range(1,5):
#Generate3DBlueNoiseTexture(16,16,16,nChannel,1.9);
#Generate3DBlueNoiseTexture(32,32,32,nChannel,1.9);
#Generate3DBlueNoiseTexture(64,64,64,nChannel,1.9);
#ChannelNames=["","L","LA","RGB","RGBA"][nChannel];
#GenerateNDBlueNoiseTexture((8,8,8,8),nChannel,"../Data/8_8_8_8/HDR_"+ChannelNames+".raw",1.9);
#GenerateNDBlueNoiseTexture((16,16,16,16),nChannel,"../Data/16_16_16_16/HDR_"+ChannelNames+".raw",1.9);
Texture=GetVoidAndClusterBlueNoise((64,64),1.9);
#Texture=GetVoidAndClusterBlueNoise((32,32,32),1.9)[:,:,0];
AnalyzeNoiseTexture(Texture,True);
PlotBinaryPatterns(Texture,3,5);
pyplot.show();
| agpl-3.0 |
ryfeus/lambda-packs | pytorch/source/numpy/lib/function_base.py | 2 | 156000 | from __future__ import division, absolute_import, print_function
try:
# Accessing collections abstact classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import functools
import re
import sys
import warnings
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import atleast_1d, transpose
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar, absolute
)
from numpy.core.umath import (
pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, not_equal, subtract
)
from numpy.core.fromnumeric import (
ravel, nonzero, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.core.function_base import add_newdoc
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import (
_insert, add_docstring, bincount, normalize_axis_index, _monotonicity,
interp as compiled_interp, interp_complex as compiled_interp_complex
)
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
if sys.version_info[0] < 3:
# Force range to be a generator, for np.delete's usage.
range = xrange
import __builtin__ as builtins
else:
import builtins
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# needed in this module for compatibility
from numpy.lib.histograms import histogram, histogramdd
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip',
'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc',
'quantile'
]
def _rot90_dispatcher(m, k=None, axes=None):
return (m,)
@array_function_dispatch(_rot90_dispatcher)
def rot90(m, k=1, axes=(0,1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes: (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
.. versionadded:: 1.12.0
Returns
-------
y : ndarray
A rotated view of `m`.
See Also
--------
flip : Reverse the order of elements in an array along the given axis.
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Notes
-----
rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
"""
axes = tuple(axes)
if len(axes) != 2:
raise ValueError("len(axes) must be 2.")
m = asanyarray(m)
if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim:
raise ValueError("Axes must be different.")
if (axes[0] >= m.ndim or axes[0] < -m.ndim
or axes[1] >= m.ndim or axes[1] < -m.ndim):
raise ValueError("Axes={} out of range for array of ndim={}."
.format(axes, m.ndim))
k %= 4
if k == 0:
return m[:]
if k == 2:
return flip(flip(m, axes[0]), axes[1])
axes_list = arange(0, m.ndim)
(axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]],
axes_list[axes[0]])
if k == 1:
return transpose(flip(m,axes[1]), axes_list)
else:
# k == 3
return flip(transpose(m, axes_list), axes[1])
def _flip_dispatcher(m, axis=None):
return (m,)
@array_function_dispatch(_flip_dispatcher)
def flip(m, axis=None):
"""
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
.. versionadded:: 1.12.0
Parameters
----------
m : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default,
axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
.. versionchanged:: 1.15.0
None and tuples of axes are supported
Returns
-------
out : array_like
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
See Also
--------
flipud : Flip an array vertically (axis=0).
fliplr : Flip an array horizontally (axis=1).
Notes
-----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all
positions.
flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at
position 0 and position 1.
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> np.flip(A)
array([[[7, 6],
[5, 4]],
[[3, 2],
[1, 0]]])
>>> np.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
>>> A = np.random.randn(3,4,5)
>>> np.all(flip(A,2) == A[:,:,::-1,...])
True
"""
if not hasattr(m, 'ndim'):
m = asarray(m)
if axis is None:
indexer = (np.s_[::-1],) * m.ndim
else:
axis = _nx.normalize_axis_tuple(axis, m.ndim)
indexer = [np.s_[:]] * m.ndim
for ax in axis:
indexer[ax] = np.s_[::-1]
indexer = tuple(indexer)
return m[indexer]
@set_module('numpy')
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : bool
Return ``True`` if the object has an iterator method or is a
sequence and ``False`` otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
True
>>> np.iterable(2)
False
"""
try:
iter(y)
except TypeError:
return False
return True
def _average_dispatcher(a, axis=None, weights=None, returned=None):
return (a, weights)
@array_function_dispatch(_average_dispatcher)
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which to average `a`. The default,
axis=None, will average over all of the elements of the input array.
If axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, averaging is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
retval, [sum_of_weights] : array_type or double
Return the average along the specified axis. When `returned` is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. `sum_of_weights` is of the
same type as `retval`. The result dtype follows a genereal pattern.
If `weights` is None, the result dtype will be that of `a` , or ``float64``
if `a` is integral. Otherwise, if `weights` is not None and `a` is non-
integral, the result type will be the type of lowest precision capable of
representing values of both `a` and `weights`. If `a` happens to be
integral, the previous rules still applies but the result dtype will
at least be ``float64``.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
numpy.result_type : Returns the type that results from applying the
numpy type promotion rules to the arguments.
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
>>> a = np.ones(5, dtype=np.float128)
>>> w = np.ones(5, dtype=np.complex64)
>>> avg = np.average(a, weights=w)
>>> print(avg.dtype)
complex256
"""
a = np.asanyarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
wgt = np.asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = np.result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=result_dtype)
if np.any(scl == 0.0):
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
if returned:
if scl.shape != avg.shape:
scl = np.broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
@set_module('numpy')
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print('ValueError')
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def _piecewise_dispatcher(x, condlist, funclist, *args, **kw):
yield x
# support the undocumented behavior of allowing scalars
if np.iterable(condlist):
for c in condlist:
yield c
@array_function_dispatch(_piecewise_dispatcher)
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray or scalar
The input domain.
condlist : list of bool arrays or bool scalars
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) == len(condlist) + 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take a 1d array as input and give an 1d
array or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., alpha=1)``, then each function is called as
``f(x, alpha=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
Apply the same function to a scalar value.
>>> y = -2
>>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x])
array(2)
"""
x = asanyarray(x)
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if isscalar(condlist) or (
not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
"with {} condition(s), either {} or {} functions are expected"
.format(n, n, n+1)
)
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections_abc.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
return y
def _select_dispatcher(condlist, choicelist, default=None):
for c in condlist:
yield c
for c in choicelist:
yield c
@array_function_dispatch(_select_dispatcher)
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning, stacklevel=2)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it separately optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry {} in condlist: should be boolean ndarray'.format(i))
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def _copy_dispatcher(a, order=None):
return (a,)
@array_function_dispatch(_copy_dispatcher)
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:`ndarray.copy` are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to:
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def _gradient_dispatcher(f, *varargs, **kwargs):
yield f
for v in varargs:
yield v
@array_function_dispatch(_gradient_dispatcher)
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior points and either first or second order accurate one-sides
(forward or backwards) differences at the boundaries.
The returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar or array, optional
Spacing between f values. Default unitary spacing for all dimensions.
Spacing can be specified using:
1. single scalar to specify a sample distance for all dimensions.
2. N scalars to specify a constant sample distance for each dimension.
i.e. `dx`, `dy`, `dz`, ...
3. N arrays to specify the coordinates of the values along each
dimension of F. The length of the array must match the size of
the corresponding dimension
4. Any combination of N scalars/arrays with the meaning of 2. and 3.
If `axis` is given, the number of varargs must equal the number of axes.
Default: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N-th order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes
of the input array. axis may be negative, in which case it counts from
the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : ndarray or list of ndarray
A set of ndarrays (or a single ndarray if there is only one dimension)
corresponding to the derivatives of f with respect to each dimension.
Each derivative has the same shape as f.
Examples
--------
>>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float)
>>> np.gradient(f)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(f, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
Spacing can be also specified with an array that represents the coordinates
of the values F along the dimensions.
For instance a uniform spacing:
>>> x = np.arange(f.size)
>>> np.gradient(f, x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
Or a non uniform one:
>>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float)
>>> np.gradient(f, x)
array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
In this example the spacing is also specified:
uniform for axis=0 and non uniform for axis=1
>>> dx = 2.
>>> y = [1., 1.5, 3.5]
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y)
[array([[ 1. , 1. , -0.5],
[ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ],
[ 2. , 1.7, 0.5]])]
It is possible to specify how boundaries are treated using `edge_order`
>>> x = np.array([0, 1, 2, 3, 4])
>>> f = x**2
>>> np.gradient(f, edge_order=1)
array([ 1., 2., 4., 6., 7.])
>>> np.gradient(f, edge_order=2)
array([-0., 2., 4., 6., 8.])
The `axis` keyword can be used to specify a subset of axes of which the
gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
Notes
-----
Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous
derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we
minimize the "consistency error" :math:`\\eta_{i}` between the true gradient
and its estimate from a linear combination of the neighboring grid-points:
.. math::
\\eta_{i} = f_{i}^{\\left(1\\right)} -
\\left[ \\alpha f\\left(x_{i}\\right) +
\\beta f\\left(x_{i} + h_{d}\\right) +
\\gamma f\\left(x_{i}-h_{s}\\right)
\\right]
By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})`
with their Taylor series expansion, this translates into solving
the following the linear system:
.. math::
\\left\\{
\\begin{array}{r}
\\alpha+\\beta+\\gamma=0 \\\\
\\beta h_{d}-\\gamma h_{s}=1 \\\\
\\beta h_{d}^{2}+\\gamma h_{s}^{2}=0
\\end{array}
\\right.
The resulting approximation of :math:`f_{i}^{(1)}` is the following:
.. math::
\\hat f_{i}^{(1)} =
\\frac{
h_{s}^{2}f\\left(x_{i} + h_{d}\\right)
+ \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right)
- h_{d}^{2}f\\left(x_{i}-h_{s}\\right)}
{ h_{s}h_{d}\\left(h_{d} + h_{s}\\right)}
+ \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2}
+ h_{s}h_{d}^{2}}{h_{d}
+ h_{s}}\\right)
It is worth noting that if :math:`h_{s}=h_{d}`
(i.e., data are evenly spaced)
we find the standard second order approximation:
.. math::
\\hat f_{i}^{(1)}=
\\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h}
+ \\mathcal{O}\\left(h^{2}\\right)
With a similar procedure the forward/backward approximations used for
boundaries can be derived.
References
----------
.. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics
(Texts in Applied Mathematics). New York: Springer.
.. [2] Durran D. R. (1999) Numerical Methods for Wave Equations
in Geophysical Fluid Dynamics. New York: Springer.
.. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on
Arbitrarily Spaced Grids,
Mathematics of Computation 51, no. 184 : 699-706.
`PDF <http://www.ams.org/journals/mcom/1988-51-184/
S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_.
"""
f = np.asanyarray(f)
N = f.ndim # number of dimensions
axes = kwargs.pop('axis', None)
if axes is None:
axes = tuple(range(N))
else:
axes = _nx.normalize_axis_tuple(axes, N)
len_axes = len(axes)
n = len(varargs)
if n == 0:
# no spacing argument - use 1 in all axes
dx = [1.0] * len_axes
elif n == 1 and np.ndim(varargs[0]) == 0:
# single scalar for all axes
dx = varargs * len_axes
elif n == len_axes:
# scalar or 1d array for each axis
dx = list(varargs)
for i, distances in enumerate(dx):
if np.ndim(distances) == 0:
continue
elif np.ndim(distances) != 1:
raise ValueError("distances must be either scalars or 1d")
if len(distances) != f.shape[axes[i]]:
raise ValueError("when 1d, distances must match "
"the length of the corresponding dimension")
diffx = np.diff(distances)
# if distances are constant reduce to the scalar case
# since it brings a consistent speedup
if (diffx == diffx[0]).all():
diffx = diffx[0]
dx[i] = diffx
else:
raise TypeError("invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype
if otype.type is np.datetime64:
# the timedelta dtype with the same unit information
otype = np.dtype(otype.name.replace('datetime', 'timedelta'))
# view as timedelta to allow addition
f = f.view(otype)
elif otype.type is np.timedelta64:
pass
elif np.issubdtype(otype, np.inexact):
pass
else:
# all other types convert to floating point
otype = np.double
for axis, ax_dx in zip(axes, dx):
if f.shape[axis] < edge_order + 1:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least (edge_order + 1) elements are required.")
# result allocation
out = np.empty_like(f, dtype=otype)
# spacing for the current axis
uniform_spacing = np.ndim(ax_dx) == 0
# Numerical differentiation: 2nd order interior
slice1[axis] = slice(1, -1)
slice2[axis] = slice(None, -2)
slice3[axis] = slice(1, -1)
slice4[axis] = slice(2, None)
if uniform_spacing:
out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx)
else:
dx1 = ax_dx[0:-1]
dx2 = ax_dx[1:]
a = -(dx2)/(dx1 * (dx1 + dx2))
b = (dx2 - dx1) / (dx1 * dx2)
c = dx1 / (dx2 * (dx1 + dx2))
# fix the shape for broadcasting
shape = np.ones(N, dtype=int)
shape[axis] = -1
a.shape = b.shape = c.shape = shape
# 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
# Numerical differentiation: 1st order edges
if edge_order == 1:
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
dx_0 = ax_dx if uniform_spacing else ax_dx[0]
# 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
dx_n = ax_dx if uniform_spacing else ax_dx[-1]
# 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n
# Numerical differentiation: 2nd order edges
else:
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
if uniform_spacing:
a = -1.5 / ax_dx
b = 2. / ax_dx
c = -0.5 / ax_dx
else:
dx1 = ax_dx[0]
dx2 = ax_dx[1]
a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2))
b = (dx1 + dx2) / (dx1 * dx2)
c = - dx1 / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]
out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
slice1[axis] = -1
slice2[axis] = -3
slice3[axis] = -2
slice4[axis] = -1
if uniform_spacing:
a = 0.5 / ax_dx
b = -2. / ax_dx
c = 1.5 / ax_dx
else:
dx1 = ax_dx[-2]
dx2 = ax_dx[-1]
a = (dx2) / (dx1 * (dx1 + dx2))
b = - (dx2 + dx1) / (dx1 * dx2)
c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len_axes == 1:
return outvals[0]
else:
return outvals
def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None):
return (a, prepend, append)
@array_function_dispatch(_diff_dispatcher)
def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
"""
Calculate the n-th discrete difference along the given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced. If zero, the input
is returned as-is.
axis : int, optional
The axis along which the difference is taken, default is the
last axis.
prepend, append : array_like, optional
Values to prepend or append to "a" along axis prior to
performing the difference. Scalar values are expanded to
arrays with length 1 in the direction of axis and the shape
of the input array in along all other axes. Otherwise the
dimension and shape must match "a" except along axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`. The
type of the output is the same as the type of the difference
between any two elements of `a`. This is the same as the type of
`a` in most cases. A notable exception is `datetime64`, which
results in a `timedelta64` output array.
See Also
--------
gradient, ediff1d, cumsum
Notes
-----
Type is preserved for boolean arrays, so the result will contain
`False` when consecutive elements are the same and `True` when they
differ.
For unsigned integer arrays, the results will also be unsigned. This
should not be surprising, as the result is consistent with
calculating the difference directly:
>>> u8_arr = np.array([1, 0], dtype=np.uint8)
>>> np.diff(u8_arr)
array([255], dtype=uint8)
>>> u8_arr[1,...] - u8_arr[0,...]
array(255, np.uint8)
If this is not desirable, then the array should be cast to a larger
integer type first:
>>> i16_arr = u8_arr.astype(np.int16)
>>> np.diff(i16_arr)
array([-1], dtype=int16)
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
>>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
>>> np.diff(x)
array([1, 1], dtype='timedelta64[D]')
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = a.ndim
axis = normalize_axis_index(axis, nd)
combined = []
if prepend is not np._NoValue:
prepend = np.asanyarray(prepend)
if prepend.ndim == 0:
shape = list(a.shape)
shape[axis] = 1
prepend = np.broadcast_to(prepend, tuple(shape))
combined.append(prepend)
combined.append(a)
if append is not np._NoValue:
append = np.asanyarray(append)
if append.ndim == 0:
shape = list(a.shape)
shape[axis] = 1
append = np.broadcast_to(append, tuple(shape))
combined.append(append)
if len(combined) > 1:
a = np.concatenate(combined, axis)
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
op = not_equal if a.dtype == np.bool_ else subtract
for _ in range(n):
a = op(a[slice1], a[slice2])
return a
def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None):
return (x, xp, fp)
@array_function_dispatch(_interp_dispatcher)
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given discrete data points (`xp`, `fp`), evaluated at `x`.
Parameters
----------
x : array_like
The x-coordinates at which to evaluate the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of float or complex
The y-coordinates of the data points, same length as `xp`.
left : optional float or complex corresponding to fp
Value to return for `x < xp[0]`, default is `fp[0]`.
right : optional float or complex corresponding to fp
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or complex (corresponding to fp) or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
Complex interpolation:
>>> x = [1.5, 4.0]
>>> xp = [2,3,5]
>>> fp = [1.0j, 0, 2+3j]
>>> np.interp(x, xp, fp)
array([ 0.+1.j , 1.+1.5j])
"""
fp = np.asarray(fp)
if np.iscomplexobj(fp):
interp_func = compiled_interp_complex
input_dtype = np.complex128
else:
interp_func = compiled_interp
input_dtype = np.float64
if period is not None:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=input_dtype)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
return interp_func(x, xp, fp, left, right)
def _angle_dispatcher(z, deg=None):
return (z,)
@array_function_dispatch(_angle_dispatcher)
def angle(z, deg=False):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
..versionchanged:: 1.16.0
This function works on subclasses of ndarray like `ma.array`.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
z = asanyarray(z)
if issubclass(z.dtype.type, _nx.complexfloating):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
a = arctan2(zimag, zreal)
if deg:
a *= 180/pi
return a
def _unwrap_dispatcher(p, discont=None, axis=None):
return (p,)
@array_function_dispatch(_unwrap_dispatcher)
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = p.ndim
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
slice1 = tuple(slice1)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def _sort_complex(a):
return (a,)
@array_function_dispatch(_sort_complex)
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def _trim_zeros(filt, trim=None):
return (filt,)
@array_function_dispatch(_trim_zeros)
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
def _extract_dispatcher(condition, arr):
return (condition, arr)
@array_function_dispatch(_extract_dispatcher)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]])
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def _place_dispatcher(arr, mask, vals):
return (arr, mask, vals)
@array_function_dispatch(_place_dispatcher)
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : ndarray
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N, it will be repeated, and if elements of `a` are to be masked,
this sequence must be non-empty.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
if not isinstance(arr, np.ndarray):
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(arr).__name__))
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from io import StringIO
>>> buf = StringIO()
>>> np.disp(u'"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
_DIMENSION_NAME = r'\w+'
_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME)
_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST)
_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT)
_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST)
def _parse_gufunc_signature(signature):
"""
Parse string signatures for a generalized universal function.
Arguments
---------
signature : string
Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)``
for ``np.matmul``.
Returns
-------
Tuple of input and output core dimensions parsed from the signature, each
of the form List[Tuple[str, ...]].
"""
if not re.match(_SIGNATURE, signature):
raise ValueError(
'not a valid gufunc signature: {}'.format(signature))
return tuple([tuple(re.findall(_DIMENSION_NAME, arg))
for arg in re.findall(_ARGUMENT, arg_list)]
for arg_list in signature.split('->'))
def _update_dim_sizes(dim_sizes, arg, core_dims):
"""
Incrementally check and update core dimension sizes for a single argument.
Arguments
---------
dim_sizes : Dict[str, int]
Sizes of existing core dimensions. Will be updated in-place.
arg : ndarray
Argument to examine.
core_dims : Tuple[str, ...]
Core dimensions for this argument.
"""
if not core_dims:
return
num_core_dims = len(core_dims)
if arg.ndim < num_core_dims:
raise ValueError(
'%d-dimensional argument does not have enough '
'dimensions for all core dimensions %r'
% (arg.ndim, core_dims))
core_shape = arg.shape[-num_core_dims:]
for dim, size in zip(core_dims, core_shape):
if dim in dim_sizes:
if size != dim_sizes[dim]:
raise ValueError(
'inconsistent size for core dimension %r: %r vs %r'
% (dim, size, dim_sizes[dim]))
else:
dim_sizes[dim] = size
def _parse_input_dimensions(args, input_core_dims):
"""
Parse broadcast and core dimensions for vectorize with a signature.
Arguments
---------
args : Tuple[ndarray, ...]
Tuple of input arguments to examine.
input_core_dims : List[Tuple[str, ...]]
List of core dimensions corresponding to each input.
Returns
-------
broadcast_shape : Tuple[int, ...]
Common shape to broadcast all non-core dimensions to.
dim_sizes : Dict[str, int]
Common sizes for named core dimensions.
"""
broadcast_args = []
dim_sizes = {}
for arg, core_dims in zip(args, input_core_dims):
_update_dim_sizes(dim_sizes, arg, core_dims)
ndim = arg.ndim - len(core_dims)
dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim])
broadcast_args.append(dummy_array)
broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args)
return broadcast_shape, dim_sizes
def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims):
"""Helper for calculating broadcast shapes with core dimensions."""
return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims)
for core_dims in list_of_core_dims]
def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes):
"""Helper for creating output arrays in vectorize."""
shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims)
arrays = tuple(np.empty(shape, dtype=dtype)
for shape, dtype in zip(shapes, dtypes))
return arrays
@set_module('numpy')
class vectorize(object):
"""
vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False,
signature=None)
Generalized function class.
Define a vectorized function which takes a nested sequence of objects or
numpy arrays as inputs and returns a single numpy array or a tuple of numpy
arrays. The vectorized function evaluates `pyfunc` over successive tuples
of the input arrays like the python map function, except it uses the
broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
signature : string, optional
Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for
vectorized matrix-vector multiplication. If provided, ``pyfunc`` will
be called with (and expected to return) arrays with shapes given by the
size of corresponding core dimensions. By default, ``pyfunc`` is
assumed to take scalars as input and output.
.. versionadded:: 1.12.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified:
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified:
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
The `signature` argument allows for vectorizing functions that act on
non-scalar arrays of fixed length. For example, you can use it for a
vectorized calculation of Pearson correlation coefficient and its p-value:
>>> import scipy.stats
>>> pearsonr = np.vectorize(scipy.stats.pearsonr,
... signature='(n),(n)->(),()')
>>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]])
(array([ 1., -1.]), array([ 0., 0.]))
Or for a vectorized convolution:
>>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)')
>>> convolve(np.eye(4), [1, 2, 1])
array([[ 1., 2., 1., 0., 0., 0.],
[ 0., 1., 2., 1., 0., 0.],
[ 0., 0., 1., 2., 1., 0.],
[ 0., 0., 0., 1., 2., 1.]])
See Also
--------
frompyfunc : Takes an arbitrary Python function and returns a ufunc
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
References
----------
.. [1] NumPy Reference, section `Generalized Universal Function API
<https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_.
"""
def __init__(self, pyfunc, otypes=None, doc=None, excluded=None,
cache=False, signature=None):
self.pyfunc = pyfunc
self.cache = cache
self.signature = signature
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
for char in otypes:
if char not in typecodes['All']:
raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
otypes = ''.join([_nx.dtype(x).char for x in otypes])
elif otypes is not None:
raise ValueError("Invalid otype specification")
self.otypes = otypes
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
if signature is not None:
self._in_and_out_core_dims = _parse_gufunc_signature(signature)
else:
self._in_and_out_core_dims = None
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes is not None:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
args = [asarray(arg) for arg in args]
if builtins.any(arg.size == 0 for arg in args):
raise ValueError('cannot call `vectorize` on size 0 inputs '
'unless `otypes` is set')
inputs = [arg.flat[0] for arg in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if self.signature is not None:
res = self._vectorize_call_with_signature(func, args)
elif not args:
res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(a, copy=False, subok=True, dtype=object)
for a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
res = array(outputs, copy=False, subok=True, dtype=otypes[0])
else:
res = tuple([array(x, copy=False, subok=True, dtype=t)
for x, t in zip(outputs, otypes)])
return res
def _vectorize_call_with_signature(self, func, args):
"""Vectorized call over positional arguments with a signature."""
input_core_dims, output_core_dims = self._in_and_out_core_dims
if len(args) != len(input_core_dims):
raise TypeError('wrong number of positional arguments: '
'expected %r, got %r'
% (len(input_core_dims), len(args)))
args = tuple(asanyarray(arg) for arg in args)
broadcast_shape, dim_sizes = _parse_input_dimensions(
args, input_core_dims)
input_shapes = _calculate_shapes(broadcast_shape, dim_sizes,
input_core_dims)
args = [np.broadcast_to(arg, shape, subok=True)
for arg, shape in zip(args, input_shapes)]
outputs = None
otypes = self.otypes
nout = len(output_core_dims)
for index in np.ndindex(*broadcast_shape):
results = func(*(arg[index] for arg in args))
n_results = len(results) if isinstance(results, tuple) else 1
if nout != n_results:
raise ValueError(
'wrong number of outputs from pyfunc: expected %r, got %r'
% (nout, n_results))
if nout == 1:
results = (results,)
if outputs is None:
for result, core_dims in zip(results, output_core_dims):
_update_dim_sizes(dim_sizes, result, core_dims)
if otypes is None:
otypes = [asarray(result).dtype for result in results]
outputs = _create_arrays(broadcast_shape, dim_sizes,
output_core_dims, otypes)
for output, result in zip(outputs, results):
output[index] = result
if outputs is None:
# did not call the function even once
if otypes is None:
raise ValueError('cannot call `vectorize` on size 0 inputs '
'unless `otypes` is set')
if builtins.any(dim not in dim_sizes
for dims in output_core_dims
for dim in dims):
raise ValueError('cannot call `vectorize` with a signature '
'including new output dimensions on size 0 '
'inputs')
outputs = _create_arrays(broadcast_shape, dim_sizes,
output_core_dims, otypes)
return outputs[0] if nout == 1 else outputs
def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None,
fweights=None, aweights=None):
return (m, y, fweights, aweights)
@array_function_dispatch(_cov_dispatcher)
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N - 1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True,
then normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer frequency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.stack((x, y), axis=0)
>>> print(np.cov(X))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x, y))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x))
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions")
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
if y.ndim > 2:
raise ValueError("y has more than 2 dimensions")
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if not rowvar and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if not rowvar and y.shape[0] != 1:
y = y.T
X = np.concatenate((X, y), axis=0)
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = X.shape[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice",
RuntimeWarning, stacklevel=2)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
c = dot(X, X_T.conj())
c *= np.true_divide(1, fact)
return c.squeeze()
def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None):
return (x, y)
@array_function_dispatch(_corrcoef_dispatcher)
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
Due to floating point rounding the resulting array may not be Hermitian,
the diagonal elements may not be 1, and the elements may not satisfy the
inequality abs(a) <= 1. The real and imaginary parts are clipped to the
interval [-1, 1] in an attempt to improve on that situation but is not
much help in the complex case.
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
DeprecationWarning, stacklevel=2)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError:
# scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
stddev = sqrt(d.real)
c /= stddev[:, None]
c /= stddev[None, :]
# Clip real and imaginary parts to [-1, 1]. This does not guarantee
# abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
# excessive work.
np.clip(c.real, -1, 1, out=c.real)
if np.iscomplexobj(c):
np.clip(c.imag, -1, 1, out=c.imag)
return c
@set_module('numpy')
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
@set_module('numpy')
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
@set_module('numpy')
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
@set_module('numpy')
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def _i0_dispatcher(x):
return (x,)
@array_function_dispatch(_i0_dispatcher)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
@set_module('numpy')
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> import matplotlib.pyplot as plt
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def _sinc_dispatcher(x):
return (x,)
@array_function_dispatch(_sinc_dispatcher)
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
https://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def _msort_dispatcher(a):
return (a,)
@array_function_dispatch(_msort_dispatcher)
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function capable of receiving a single axis argument.
It is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
axis = _nx.normalize_axis_tuple(axis, nd)
for ax in axis:
keepdim[ax] = 1
if len(axis) == 1:
kwargs['axis'] = axis[0]
else:
keep = set(range(nd)) - set(axis)
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
keepdim = tuple(keepdim)
else:
keepdim = (1,) * a.ndim
r = func(a, **kwargs)
return r, keepdim
def _median_dispatcher(
a, axis=None, out=None, overwrite_input=None, keepdims=None):
return (a, out)
@array_function_dispatch(_median_dispatcher)
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i
e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
two middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
indexer = tuple(indexer)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact) and sz > 0:
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
return np.lib.utils._median_nancheck(part, rout, axis, out)
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
interpolation=None, keepdims=None):
return (a, q, out)
@array_function_dispatch(_percentile_dispatcher)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : array_like of float
Percentile or sequence of percentiles to compute, which must be between
0 and 100 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array.
.. versionchanged:: 1.9.0
A tuple of axes is supported
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow the input array `a` to be modified by intermediate
calculations, to save memory. In this case, the contents of the input
`a` after this function completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired percentile lies between two data points
``i < j``:
* 'linear': ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* 'lower': ``i``.
* 'higher': ``j``.
* 'nearest': ``i`` or ``j``, whichever is nearest.
* 'midpoint': ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean
median : equivalent to ``percentile(..., 50)``
nanpercentile
quantile : equivalent to percentile, except with q in the range [0, 1].
Notes
-----
Given a vector ``V`` of length ``N``, the q-th percentile of
``V`` is the value ``q/100`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a == b)
The different types of interpolation can be visualized graphically:
.. plot::
import matplotlib.pyplot as plt
a = np.arange(4)
p = np.linspace(0, 100, 6001)
ax = plt.gca()
lines = [
('linear', None),
('higher', '--'),
('lower', '--'),
('nearest', '-.'),
('midpoint', '-.'),
]
for interpolation, style in lines:
ax.plot(
p, np.percentile(a, p, interpolation=interpolation),
label=interpolation, linestyle=style)
ax.set(
title='Interpolation methods for list: ' + str(a),
xlabel='Percentile',
ylabel='List item returned',
yticks=a)
ax.legend()
plt.show()
"""
q = np.true_divide(q, 100.0) # handles the asarray for us too
if not _quantile_is_valid(q):
raise ValueError("Percentiles must be in the range [0, 100]")
return _quantile_unchecked(
a, q, axis, out, overwrite_input, interpolation, keepdims)
def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
interpolation=None, keepdims=None):
return (a, q, out)
@array_function_dispatch(_quantile_dispatcher)
def quantile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the q-th quantile of the data along the specified axis.
..versionadded:: 1.15.0
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : array_like of float
Quantile or sequence of quantiles to compute, which must be between
0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed. The
default is to compute the quantile(s) along a flattened
version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow the input array `a` to be modified by intermediate
calculations, to save memory. In this case, the contents of the input
`a` after this function completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
Returns
-------
quantile : scalar or ndarray
If `q` is a single quantile and `axis=None`, then the result
is a scalar. If multiple quantiles are given, first axis of
the result corresponds to the quantiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean
percentile : equivalent to quantile, but with q in the range [0, 100].
median : equivalent to ``quantile(..., 0.5)``
nanquantile
Notes
-----
Given a vector ``V`` of length ``N``, the q-th quantile of
``V`` is the value ``q`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the quantile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the
same as the maximum if ``q=1.0``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.quantile(a, 0.5)
3.5
>>> np.quantile(a, 0.5, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.quantile(a, 0.5, axis=1)
array([ 7., 2.])
>>> np.quantile(a, 0.5, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.quantile(a, 0.5, axis=0)
>>> out = np.zeros_like(m)
>>> np.quantile(a, 0.5, axis=0, out=out)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.quantile(b, 0.5, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a == b)
"""
q = np.asanyarray(q)
if not _quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
return _quantile_unchecked(
a, q, axis, out, overwrite_input, interpolation, keepdims)
def _quantile_unchecked(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
"""Assumes that q is in [0, 1], and is an ndarray"""
r, k = _ureduce(a, func=_quantile_ureduce_func, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
return r.reshape(q.shape + k)
else:
return r
def _quantile_is_valid(q):
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.ndim == 1 and q.size < 10:
for i in range(q.size):
if q[i] < 0.0 or q[i] > 1.0:
return False
else:
# faster than any()
if np.count_nonzero(q < 0.0) or np.count_nonzero(q > 1.0):
return False
return True
def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# prepare a for partitioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = 0.5 * (floor(indices) + ceil(indices))
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with q-th is first
ap = np.moveaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with q-th is first
ap = np.moveaxis(ap, axis, 0)
weights_below = np.moveaxis(weights_below, axis, 0)
weights_above = np.moveaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with q-th is first
x1 = np.moveaxis(x1, axis, 0)
x2 = np.moveaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in percentile",
RuntimeWarning, stacklevel=3)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def _trapz_dispatcher(y, x=None, dx=None, axis=None):
return (y, x)
@array_function_dispatch(_trapz_dispatcher)
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
The sample points corresponding to the `y` values. If `x` is None,
the sample points are assumed to be evenly spaced `dx` apart. The
default is None.
dx : scalar, optional
The spacing between sample points when `x` is None. The default is 1.
axis : int, optional
The axis along which to integrate.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = y.ndim
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis)
return ret
def _meshgrid_dispatcher(*xi, **kwargs):
return xi
# Based on scitools meshgrid
@array_function_dispatch(_meshgrid_dispatcher)
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = np.meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> import matplotlib.pyplot as plt
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = np.meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
>>> plt.show()
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:])
for i, x in enumerate(xi)]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + s0[2:]
output[1].shape = (-1, 1) + s0[2:]
if not sparse:
# Return the full N-D matrix (not only the 1-D vector)
output = np.broadcast_arrays(*output, subok=True)
if copy_:
output = [x.copy() for x in output]
return output
def _delete_dispatcher(arr, obj, axis=None):
return (arr, obj)
@array_function_dispatch(_delete_dispatcher)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = -1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning, stacklevel=2)
if wrap:
return wrap(arr)
else:
return arr.copy(order=arrorder)
axis = normalize_axis_index(axis, ndim)
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy(order=arrorder))
else:
return arr.copy(order=arrorder)
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arrorder)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[tuple(slobj)] = arr[tuple(slobj)]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[tuple(slobj)] = arr[tuple(slobj2)]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[tuple(slobj2)]
slobj2[axis] = keep
new[tuple(slobj)] = arr[tuple(slobj2)]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn("in the future insert will treat boolean arrays and "
"array-likes as boolean index instead of casting it "
"to integer", FutureWarning, stacklevel=2)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, obj)
new[tuple(slobj)] = arr[tuple(slobj)]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[tuple(slobj)] = arr[tuple(slobj2)]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning, stacklevel=2)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning, stacklevel=2)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning, stacklevel=2)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[tuple(slobj)]
if wrap:
return wrap(new)
else:
return new
def _insert_dispatcher(arr, obj, values, axis=None):
return (arr, obj, values)
@array_function_dispatch(_insert_dispatcher)
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
elif ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning, stacklevel=2)
arr = arr.copy(order=arrorder)
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
else:
axis = normalize_axis_index(axis, ndim)
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning, stacklevel=2)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.moveaxis(values, 0, axis)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, index)
new[tuple(slobj)] = arr[tuple(slobj)]
slobj[axis] = slice(index, index+numnew)
new[tuple(slobj)] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[tuple(slobj)] = arr[tuple(slobj2)]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning, stacklevel=2)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arrorder)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[tuple(slobj)] = values
new[tuple(slobj2)] = arr
if wrap:
return wrap(new)
return new
def _append_dispatcher(arr, values, axis=None):
return (arr, values)
@array_function_dispatch(_append_dispatcher)
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
def _digitize_dispatcher(x, bins, right=None):
return (x, bins)
@array_function_dispatch(_digitize_dispatcher)
def digitize(x, bins, right=False):
"""
Return the indices of the bins to which each value in input array belongs.
========= ============= ============================
`right` order of bins returned index `i` satisfies
========= ============= ============================
``False`` increasing ``bins[i-1] <= x < bins[i]``
``True`` increasing ``bins[i-1] < x <= bins[i]``
``False`` decreasing ``bins[i-1] > x >= bins[i]``
``True`` decreasing ``bins[i-1] >= x > bins[i]``
========= ============= ============================
If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is
returned as appropriate.
Parameters
----------
x : array_like
Input array to be binned. Prior to NumPy 1.10.0, this array had to
be 1-dimensional, but can now have any shape.
bins : array_like
Array of bins. It has to be 1-dimensional and monotonic.
right : bool, optional
Indicating whether the intervals include the right or the left bin
edge. Default behavior is (right==False) indicating that the interval
does not include the right edge. The left bin end is open in this
case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
Returns
-------
indices : ndarray of ints
Output array of indices, of same shape as `x`.
Raises
------
ValueError
If `bins` is not monotonic.
TypeError
If the type of the input is complex.
See Also
--------
bincount, histogram, unique, searchsorted
Notes
-----
If values in `x` are such that they fall outside the bin range,
attempting to index `bins` with the indices that `digitize` returns
will result in an IndexError.
.. versionadded:: 1.10.0
`np.digitize` is implemented in terms of `np.searchsorted`. This means
that a binary search is used to bin the values, which scales much better
for larger number of bins than the previous linear search. It also removes
the requirement for the input array to be 1-dimensional.
For monotonically _increasing_ `bins`, the following are equivalent::
np.digitize(x, bins, right=True)
np.searchsorted(bins, x, side='left')
Note that as the order of the arguments are reversed, the side must be too.
The `searchsorted` call is marginally faster, as it does not do any
monotonicity checks. Perhaps more importantly, it supports all dtypes.
Examples
--------
>>> x = np.array([0.2, 6.4, 3.0, 1.6])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
>>> inds = np.digitize(x, bins)
>>> inds
array([1, 4, 3, 2])
>>> for n in range(x.size):
... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]])
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
2.5 <= 3.0 < 4.0
1.0 <= 1.6 < 2.5
>>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
>>> bins = np.array([0, 5, 10, 15, 20])
>>> np.digitize(x,bins,right=True)
array([1, 2, 3, 4, 4])
>>> np.digitize(x,bins,right=False)
array([1, 3, 3, 4, 5])
"""
x = _nx.asarray(x)
bins = _nx.asarray(bins)
# here for compatibility, searchsorted below is happy to take this
if np.issubdtype(x.dtype, _nx.complexfloating):
raise TypeError("x may not be complex")
mono = _monotonicity(bins)
if mono == 0:
raise ValueError("bins must be monotonically increasing or decreasing")
# this is backwards because the arguments below are swapped
side = 'left' if right else 'right'
if mono == -1:
# reverse the bins, and invert the results
return len(bins) - _nx.searchsorted(bins[::-1], x, side=side)
else:
return _nx.searchsorted(bins, x, side=side)
| mit |
santiago-salas-v/walas | wgs_r.py | 1 | 25554 | import numpy as np
from matplotlib import pyplot as plt
from matplotlib import lines
import ctypes
import os
from scipy.integrate import odeint
from crt_lib import mm, namen, h_298, cp_ig_durch_r, mu, delta_h_r, tc, pc, omega_af, df_dt
import z_l_v
# Journal of Power Sources 173 (2007) 467–477
# Reaktor
n_t = 1 # Rohre
d_t = 0.06 # m Rohrdurchmesser
l_r = 0.3 # m Rohrlänge
# Katalysator
d_p = 0.0005 # m Feststoff
rho_b = 1190 # kg Kat/m^3 Feststoff
phi = 0.38 + 0.073 * (1 - (d_t / d_p - 2)**2 / (d_t / d_p)
** 2) # m^3 Gas/m^3 Feststoff
m_kat = 1190 * (1 - 0.3) * np.pi / 4 * (
0.03)**2 * 7 # kg Kat (pro Rohr)
# Betriebsbedingungen
t0 = 187.5 + 273.15 # K
p0 = 1.01325 # bar
# Wärmetauschparameter
u = 0 # W/m^2/K
# Kühlmitteleigenschaften (VDI-WA)
t_r = (240 - 230) / (33.467 - 27.968) * (
29 - 33.467) + 240 + 273.15 # K
p_sat = 29 # bar
h_sat_l = (1037.5 - 990.21) / (33.467 - 27.968) * (
29 - 33.467) + 1037.5 # kJ/kg
h_sat_v = (2803.1 - 2803.0) / (33.467 - 27.968) * (
29 - 33.467) + 2803.1 # kJ/kg
delta_h_sat = (h_sat_v - h_sat_l)
# Zulaufbedingungen
n_i_0 = np.array([
6.6, 9.1, 36.0,
26.4, 0, 4.7,
0, 0, 0,
0
]) / 60**2 # mol/s
y_i0 = n_i_0 / sum(n_i_0)
m_dot = sum(n_i_0 * mm / 1000.) # kg/s
alpha_tr, epsilon, sigma, psi, omega = z_l_v.use_pr_eos()
# Stoechiometrische Koeffizienten
nuij = np.zeros([len(namen), 1])
# RWGS Reverse-Wassergasshiftreaktion (muss gleich sein als die Vorwärtsreaktion,
# wofür die Kinetik verfügbar ist)
nuij[[
namen.index('CO2'),
namen.index('H2'),
namen.index('MeOH'),
namen.index('H2O'),
namen.index('CO'),
namen.index('N2'),
], 0] = - np.array([+1, +1, 0, -1, -1, 0], dtype=float)
delta_h_r_298 = nuij.T.dot(h_298) # J/mol
# Berechnung der Parameter
g = m_dot / (np.pi / 4 * d_t**2) / n_t # kg/m^2/s
mm_m_0 = sum(y_i0 * mm) * 1 / 1000. # kg/mol
cp_m_0 = sum(y_i0 * cp_ig_durch_r(t0) * 8.3145) # J/mol/K
cp_g_0 = cp_m_0 / mm_m_0 # J/kg/K
# Anzahl an Übertragungseinheiten (NTU)
ntu = l_r * 1 / (g * cp_g_0) * 2 * u / (d_t / 2)
# m * m^2 s/kg * kg K /J * J/s/m^2/K *1/m = [dimensionslose Einheiten]
# Stöchiometrische Zahl
sn = (y_i0[namen.index('H2')] - y_i0[namen.index('CO2')]) / (
y_i0[namen.index('CO2')] + y_i0[namen.index('CO')]
)
y_0 = np.empty([len(y_i0) + 1 + 1])
y_0[:-2] = y_i0
y_0[-2] = p0
y_0[-1] = t0
z_d_l_r = np.linspace(0, 1, 100)
d_z_dimlos = 1 / (len(z_d_l_r) - 1) # dimlos
dlr = d_z_dimlos * l_r # m
soln = odeint(lambda y, z0:
df_dt(y, z0, g, d_t, l_r,
phi, d_p, rho_b,
u, t_r,
alpha_tr, epsilon, sigma, psi, omega ),
y_0, z_d_l_r)
y_i_soln = soln[:, :len(y_i0)]
p_soln = soln[:, -2]
t_soln = soln[:, -1]
n_i_soln = np.zeros_like(y_i_soln)
m_i_soln = np.zeros_like(y_i_soln)
n_soln = np.zeros_like(z_d_l_r)
mm_m_soln = np.zeros_like(z_d_l_r)
m_soln = np.zeros_like(z_d_l_r)
v_soln = np.zeros_like(z_d_l_r)
m_km_soln = np.zeros_like(z_d_l_r)
ums_soln = np.zeros_like(z_d_l_r)
for i in range(len(z_d_l_r)):
mm_m_soln[i] = sum(y_i_soln[i] * mm * 1 / 1000.) # kg/mol
n_soln[i] = g * n_t * 60**2 * (np.pi / 4 * d_t**2) / mm_m_soln[i]
# kg/s/m^2 * 60^2s/h * m^2 / kg*mol = mol/h
m_soln[i] = g * n_t * 60**2 * (np.pi / 4 * d_t**2) # kg/h
n_i_soln[i] = n_soln[i] * y_i_soln[i] # mol/h
m_i_soln[i] = n_soln[i] * y_i_soln[i] * (mm * 1 / 1000.)
# mol/h * g/mol * 1kg/1000g
v_soln[i] = n_soln[i] * 8.3145 * 1e-5 * t_soln[i] / p_soln[i]
ums_soln[i] = (n_i_soln[0][namen.index('CO')] -
n_i_soln[i][namen.index('CO')]
) / n_i_soln[0][namen.index('CO')]
m_km_soln[i] = u * (2 / (d_t / 2)) * (t_soln[i] - t_r) * (
np.pi / 4 * d_t**2) / delta_h_sat * n_t * 60**2 / 1000.
# J/s/K/m^2 * 1/m * K * m^2 * kg/kJ * 60^2s/h * 1kJ/(1000J) = kg/h/m
# Energie-Analyse
t_m_1 = 1 / 2 * (36696 / 8.3145 - np.sqrt(36696 /
8.3145 * (36696 / 8.3145 - 4 * t_r)))
t_m_2 = 1 / 2 * (-94765 / 8.3145 - np.sqrt(-94765 /
8.3145 * (-94765 / 8.3145 - 4 * t_r)))
vars_1 = [
[r'\rho_b', rho_b, r'\frac{kg_{Kat}}{m^3_{Schüttung}}'],
['\phi', phi, r'\frac{m^3_{Gas}}{m^3_{Schüttung}}'],
['D_p', d_p, 'm'],
]
vars_2 = [
['D_t', d_t, 'm'],
['L_R', l_r, 'm'],
['n_T', n_t, ''],
['T_0', t0 - 273.15, '°C'],
['P_0', p0, 'bar'],
]
vars_3 = [
['U', u, r'\frac{W}{m^2\cdot K}'],
['\dot m', m_dot, 'kg/s'],
['C_{p_g}', cp_g_0 / 1000., r'\frac{kJ}{kg\cdot K}'],
['NTU', ntu, ''],
['SN', sn, ''],
]
vars_4 = [
['T_r', t_r - 273.15, '°C_{Kühlmittel}'],
['P_{Sät}', p_sat, 'bar_{Kühlmittel}'],
['\Delta h_{v}', delta_h_sat, r'\frac{kJ}{kg_{Kühlmittel}}'],
]
text_1 = '\n'.join(['$' + ' = '.join([line[0], '{:g}'.format(line[1]) +
' ' + line[2]]) + '$'
for line in vars_1])
text_2 = '\n'.join(['$' + ' = '.join([line[0], '{:g}'.format(line[1]) +
' ' + line[2]]) + '$'
for line in vars_2])
text_3 = '\n'.join(['$' + ' = '.join([line[0], '{:g}'.format(line[1]) +
' ' + line[2]]) + '$'
for line in vars_3])
text_4 = '\n'.join(['$' + ' = '.join([line[0], '{:g}'.format(line[1]) +
' ' + line[2]]) + '$'
for line in vars_4])
fig = plt.figure(1)
fig.suptitle('Adiabates System' +
'(Journal of Power Sources 173 (2007) 467–477)')
fig.text(0.05, 0.935, text_1, va='top', fontsize=8)
fig.text(0.25, 0.935, text_2, va='top', fontsize=8)
fig.text(0.50, 0.935, text_3, va='top', fontsize=8)
fig.text(0.75, 0.935, text_4, va='top', fontsize=8)
ax = plt.subplot2grid([2, 3], [0, 0])
ax.plot(z_d_l_r, v_soln, label='$\dot V$')
ax.set_ylabel(r'$\frac{\dot V}{m^3/h}$')
ax.set_xlabel('Reduzierte Position, $z/L_R$')
ax2 = plt.subplot2grid([2, 3], [1, 0])
ax2.plot(z_d_l_r, m_km_soln)
ax2.fill_between(z_d_l_r, 0, m_km_soln, color='orange')
ax2.text(0.3, 1 / 2. * (m_km_soln[0] + m_km_soln[-1]),
'{:g}'.format(sum(m_km_soln * dlr)) + 'kg/h \n')
ax2.set_ylabel(r'$\frac{\dot m_{Kuehlmittel}}{\frac{kg}{h\cdot m}}$')
ax2.set_xlabel('Reduzierte Position, $z/L_R$')
ax3 = plt.subplot2grid([2, 3], [1, 1], colspan=2)
ax3.set_ylabel('Massenstrom / (kg/h)')
ax3.set_xlabel('Reduzierte Position, $z/L_R$')
for item in ['CO', 'H2O', 'MeOH', 'CO2']:
marker = np.random.choice(list(lines.lineMarkers.keys()))
index = namen.index(item)
ax3.plot(z_d_l_r, m_i_soln[:, index], label=item,
marker=marker)
ax3.legend(loc=1)
ax4 = plt.subplot2grid([2, 3], [0, 1])
ax4_1 = ax4.twinx()
ax4_1.set_ylabel('CO - Molanteil')
ax4.set_ylabel('Temperatur / °C')
ax4.set_xlabel('Reduzierte Position, $z/L_R$')
ax4.plot(z_d_l_r, t_soln - 273.15, label='T / °C')
ax4_1.plot(z_d_l_r, y_i_soln[:, namen.index('CO')],
ls='--', color='gray')
ax5 = plt.subplot2grid([2, 3], [0, 2], colspan=2)
ax5.set_ylabel('Druck / bar')
ax5.set_xlabel('Reduzierte Position, $z/L_R$')
ax5.plot(z_d_l_r, p_soln, label='p / bar')
plt.tight_layout(rect=[0, 0, 0.95, 0.75])
print('')
print('NTU= ' + '{:g}'.format(ntu))
print('\n'.join([
namen[i] + ': ' + '{:g}'.format(x) + ' kg/h'
for i, x in enumerate(m_i_soln[-1])
]))
print('T=' + str(t_soln[-1] - 273.15) + '°C')
print('P=' + str(p_soln[-1]) + 'bar')
print('V0=' + str(v_soln[0]) + 'm^3/h')
print('V=' + str(v_soln[-1]) + 'm^3/h')
print('Kühlmittel: Gesättigtes $H_2O(l)$' +
' bei ' + '{:g}'.format(p_sat) + ' bar' +
'\n' + 'Verdampfungsenthalpie: ' +
'{:g}'.format(delta_h_sat) +
'kJ/kg' + '\n' + 'Kühlmittelmassenstrom: ' +
'{:g}'.format(sum(m_km_soln * dlr)) + 'kg/h')
print('Partikeldurchmesser für DeltaP=' +
'{:g}'.format(p_soln[0] - p_soln[-1]) + ' bar: ' +
'{:g}'.format(d_p) + ' m'
)
# T-abhängige Parameter, dem Artikel nach
def k_t(t):
# R.L. Keiski et al./Appl. Catal. A 101 (1993) 317-338
# ICI-Fe3O4-Cr2O3
r = 8.3145 # Pa m^3/mol/K
# Gleichgewichtskonstante
k_1 = np.exp(4577.8 / t - 4.33)
# Angepasste Parameter des kinetischen Modells
# k0 exp(-Ea/RT)
k0 = np.exp(12.64) # mol/kgKat/s * (mol/L)^(-0.1-0.54)
ea = 8008*r # J/mol
k = k0 * np.exp(-ea / (r * t))
# mol / kgKat / s * (mol/L)^(-n-m-p-q)
return np.array([
k_1, k
])
def r_wgs_keiski(t, p_i, z_realgas_f):
# R.L. Keiski et al./Appl. Catal. A 101 (1993) 317-338
# ICI-Fe3O4-Cr2O3
p_co2 = p_i[namen.index('CO2')]
p_co = p_i[namen.index('CO')]
p_h2 = p_i[namen.index('H2')]
p_h2o = p_i[namen.index('H2O')]
p = sum(p_i)
[k_1, k] = k_t(t)
r_co = k * p_co**0.54 * p_h2o**0.10 * (
1 - 1 / k_1 * p_co2 * p_h2 / (
p_co * p_h2o
)
) * (1e5 / 1000. /
(8.3145 * t * z_realgas_f)
)**(0.54 + 0.1)
# mol / kgKat / s * (mol/L)^(-0.64) *
# (bar)^0.64 * (mol K/m^3/Pa/K)^(0.64) *
# (10^5Pa/bar * 1m^3/1000L)^0.64
# = mol / kgKat / s
return r_co
def df_dt(y, _, g, r_fun):
xi = y[0]
p = y[-2]
t = y[-1]
y_i = y_i0 + nuij.dot(np.array([xi]))
mm_m = sum(y_i * mm) * 1 / 1000. # kg/mol
cp_m = sum(y_i * cp_ig_durch_r(t) * 8.3145) # J/mol/K
cp_g = cp_m / mm_m # J/kg/K
z_realgas_f = z_l_v.z_non_sat(
t, p, y_i, tc, pc, omega_af,
alpha_tr, epsilon, sigma, psi, omega)['z']
c_t = p / (8.3145 * 1e-5 * t) * 1 / z_realgas_f
# bar * mol/bar/m^3/K*K = mol/m^3
m_punkt = g * np.pi/4. * d_t**2
n_punkt = m_punkt / mm_m # kg/s / kg*mol = mol/s
v_punkt = m_punkt / mm_m / c_t # kg/s / kg*mol * m^3/mol = m^3/s
p_i = y_i * p # bar
delta_h_r_t = delta_h_r(t, nuij, delta_h_r_298)
mu_t_y = mu(t, y_i)
r_j = r_fun(t, p_i, z_realgas_f)
# mol/kg Kat/s
r_strich_j = r_j * rho_b
# mol / kg Kat/s * kgKat/m^3Katschüttung
# = mol / m^3Katschüttung / s
dp_dvkat = -1 / 10 ** 5 * g / (
c_t * mm_m * d_p * np.pi / 4 * d_t**2
) * (1 - phi) / phi ** 3 * (
150 * (1 - phi) * mu_t_y / d_p + 1.75 * g
) # kg/m^2/s * m^3/mol * mol/kg /m^3Schüttung *
# m^3Katfest/m^3Schüttung*m^6Schüttung/m^6Gas * kg/s/m^2
dxi_dvkat = 1 / n_punkt * r_strich_j
# mol/mol * s/m^3 * m^3/m^3Katschüttung / s = mol/mol/m^3Katschüttung
dt_dvkat = 1 / n_punkt * (
-delta_h_r_t / cp_m
) * r_strich_j
# s/m^3 * J/mol /J*molK * m^3 / m^3Katschüttung/s
# = K/m^3Katschüttung
result = np.empty_like(y)
result[0] = dxi_dvkat
result[-2] = dp_dvkat
result[-1] = dt_dvkat
return result
# S Z Saw and J Nandong 2016 IOP Conf. Ser.: Mater. Sci. Eng. 121 012022
# Reaktor
n_t = 1 # Rohre
d_t = 0.16 # m Rohrdurchmesser
l_r = 1 # m Rohrlänge
# Katalysator
d_p = 2e-3 # m Feststoff
rho_c = 1945 # kg Kat/m^3 Feststoff
phi = 0.38 + 0.073 * (1 - (d_t / d_p - 2)**2 / (d_t / d_p)
** 2) # m^3 Gas/m^3 Feststoff
phi = 0.4
rho_b = rho_c*(1-phi) # kg Kat/m^3 Schüttung
# Betriebsbedingungen
t0 = 599.85 + 273.15 # K
p0 = 1.01325 # bar
# Wärmetauschparameter
u = 0 # W/m^2/K
# Kühlmitteleigenschaften (VDI-WA)
t_r = (240 - 230) / (33.467 - 27.968) * (
29 - 33.467) + 240 + 273.15 # K
p_sat = 29 # bar
h_sat_l = (1037.5 - 990.21) / (33.467 - 27.968) * (
29 - 33.467) + 1037.5 # kJ/kg
h_sat_v = (2803.1 - 2803.0) / (33.467 - 27.968) * (
29 - 33.467) + 2803.1 # kJ/kg
delta_h_sat = (h_sat_v - h_sat_l)
# Zulaufbedingungen
n_i_0 = np.array([
1 / (1 + 2.4), 0, 0,
2.4 / (1 + 2.4), 0, 0,
0, 0, 0,
0
]) * 16/60**2 *1/8.3145/873 # mol/s
y_i0 = n_i_0 / sum(n_i_0)
m_dot = sum(n_i_0 * mm / 1000.) # kg/s
# Berechnung der Parameter
g = m_dot / (np.pi / 4 * d_t**2) / n_t # kg/m^2/s
mm_m_0 = sum(y_i0 * mm) * 1 / 1000. # kg/mol
cp_m_0 = sum(y_i0 * cp_ig_durch_r(t0) * 8.3145) # J/mol/K
cp_g_0 = cp_m_0 / mm_m_0 # J/kg/K
# Anzahl an Übertragungseinheiten (NTU)
ntu = l_r * 1 / (g * cp_g_0) * 2 * u / (d_t / 2)
# m * m^2 s/kg * kg K /J * J/s/m^2/K *1/m = [dimensionslose Einheiten]
# Stöchiometrische Zahl
sn = (y_i0[namen.index('H2')] - y_i0[namen.index('CO2')]) / (
y_i0[namen.index('CO2')] + y_i0[namen.index('CO')]
)
y_0 = np.empty(3)
y_0[0] = 0
y_0[-2] = p0
y_0[-1] = t0
tot_v_kat = 16e-2**2*np.pi/4*1 # m^3
v_kat = np.linspace(0, tot_v_kat, 20)
frac_v_kat = v_kat / tot_v_kat # dimlos
d_v_kat = 1 / (len(v_kat) - 1) * tot_v_kat # m^3
soln = odeint(lambda y, z0:
df_dt(y, z0, g, r_wgs_keiski),
y_0, v_kat)
xi_soln = soln[:, 0]
p_soln = soln[:, -2]
t_soln = soln[:, -1]
y_i_soln = np.zeros([len(v_kat), nuij.shape[0]])
n_i_soln = np.zeros_like(y_i_soln)
m_i_soln = np.zeros_like(y_i_soln)
n_soln = np.zeros_like(xi_soln)
mm_m_soln = np.zeros_like(xi_soln)
m_soln = np.zeros_like(xi_soln)
v_soln = np.zeros_like(xi_soln)
m_km_soln = np.zeros_like(xi_soln)
ums_soln = np.zeros_like(xi_soln)
for i in range(len(v_kat)):
y_i_soln[i] = y_i0 + nuij.dot(np.array([xi_soln[i]]))
mm_m_soln[i] = sum(y_i_soln[i] * mm * 1 / 1000.) # kg/mol
n_soln[i] = g * n_t * 60**2 * (np.pi / 4 * d_t**2) / mm_m_soln[i]
# kg/s/m^2 * 60^2s/h * m^2 / kg*mol = mol/h
m_soln[i] = g * n_t * 60**2 * (np.pi / 4 * d_t**2) # kg/h
n_i_soln[i] = n_soln[i] * y_i_soln[i] # mol/h
m_i_soln[i] = n_soln[i] * y_i_soln[i] * (mm * 1 / 1000.)
# mol/h * g/mol * 1kg/1000g
v_soln[i] = n_soln[i] * 8.3145 * 1e-5 * t_soln[i] / p_soln[i]
ums_soln[i] = (n_i_soln[0][namen.index('CO')] -
n_i_soln[i][namen.index('CO')]
) / n_i_soln[0][namen.index('CO')]
m_km_soln[i] = u * (2 / (d_t / 2)) * (t_soln[i] - t_r) * (
np.pi / 4 * d_t**2) / delta_h_sat * n_t * 60**2 / 1000.
# J/s/K/m^2 * 1/m * K * m^2 * kg/kJ * 60^2s/h * 1kJ/(1000J) = kg/h/m
vars_1 = [
[r'\rho_b', rho_b, r'\frac{kg_{Kat}}{m^3_{Schüttung}}'],
['\phi', phi, r'\frac{m^3_{Gas}}{m^3_{Schüttung}}'],
['D_p', d_p, 'm'],
]
vars_2 = [
['D_t', d_t, 'm'],
['L_R', l_r, 'm'],
['n_T', n_t, ''],
['T_0', t0 - 273.15, '°C'],
['P_0', p0, 'bar'],
]
vars_3 = [
['U', u, r'\frac{W}{m^2\cdot K}'],
['\dot m', m_dot, 'kg/s'],
['C_{p_g}', cp_g_0 / 1000., r'\frac{kJ}{kg\cdot K}'],
['NTU', ntu, ''],
['SN', sn, ''],
]
vars_4 = [
['T_r', t_r - 273.15, '°C_{Kühlmittel}'],
['P_{Sät}', p_sat, 'bar_{Kühlmittel}'],
['\Delta h_{v}', delta_h_sat, r'\frac{kJ}{kg_{Kühlmittel}}'],
]
text_1 = '\n'.join(['$' + ' = '.join([line[0], '{:g}'.format(line[1]) +
' ' + line[2]]) + '$'
for line in vars_1])
text_2 = '\n'.join(['$' + ' = '.join([line[0], '{:g}'.format(line[1]) +
' ' + line[2]]) + '$'
for line in vars_2])
text_3 = '\n'.join(['$' + ' = '.join([line[0], '{:g}'.format(line[1]) +
' ' + line[2]]) + '$'
for line in vars_3])
text_4 = '\n'.join(['$' + ' = '.join([line[0], '{:g}'.format(line[1]) +
' ' + line[2]]) + '$'
for line in vars_4])
fig = plt.figure(3)
fig.suptitle('Adiabates System ' +
'IOP Conf. Series: Materials Science and Engineering 121(2016) 012022')
fig.text(0.05, 0.935, text_1, va='top', fontsize=8)
fig.text(0.25, 0.935, text_2, va='top', fontsize=8)
fig.text(0.50, 0.935, text_3, va='top', fontsize=8)
fig.text(0.75, 0.935, text_4, va='top', fontsize=8)
ax = plt.subplot2grid([2, 3], [0, 0])
ax.plot(frac_v_kat, v_soln, label='$\dot V$')
ax.set_ylabel(r'$\frac{\dot V}{m^3/h}$')
ax.set_xlabel(r'$V_{Kat}/V_{Kat, ges}$')
ax2 = plt.subplot2grid([2, 3], [1, 0])
ax2.plot(frac_v_kat, m_km_soln)
ax2.fill_between(v_kat, 0, m_km_soln, color='orange')
ax2.text(0.3, 1 / 2. * (m_km_soln[0] + m_km_soln[-1]),
'{:g}'.format(sum(m_km_soln * d_v_kat)) + 'kg/h \n')
ax2.set_ylabel(r'$\frac{\dot m_{Kuehlmittel}}{\frac{kg}{h\cdot m}}$')
ax2.set_xlabel(r'$V_{Kat}/V_{Kat, ges}$')
ax3 = plt.subplot2grid([2, 3], [1, 1], colspan=2)
ax3.set_ylabel('Massenstrom / (kg/h)')
ax3.set_xlabel(r'$V_{Kat}/V_{Kat, ges}$')
for item in ['CO', 'H2O', 'H2', 'CO2']:
marker = np.random.choice(list(lines.lineMarkers.keys()))
index = namen.index(item)
ax3.plot(frac_v_kat, m_i_soln[:, index], label=item,
marker=marker)
ax3.legend(loc=1)
ax4 = plt.subplot2grid([2, 3], [0, 1])
ax4_1 = ax4.twinx()
ax4_1.set_ylabel('CO - Molanteil')
ax4.set_ylabel('Temperatur / °C')
ax4.set_xlabel(r'$V_{Kat}/V_{Kat, ges}$')
ax4.plot(frac_v_kat, t_soln - 273.15, label='T / °C')
ax4_1.plot(frac_v_kat, y_i_soln[:, namen.index('CO')],
ls='--', color='gray')
ax5 = plt.subplot2grid([2, 3], [0, 2], colspan=2)
ax5.set_ylabel('Druck / bar')
ax5.set_xlabel(r'$V_{Kat}/V_{Kat, ges}$')
ax5.plot(frac_v_kat, p_soln, label='p / bar')
plt.tight_layout(rect=[0, 0, 0.95, 0.75])
print('')
print('NTU= ' + '{:g}'.format(ntu))
print('\n'.join([
namen[i] + ': ' + '{:g}'.format(x) + ' kg/h'
for i, x in enumerate(m_i_soln[-1])
]))
print('T=' + str(t_soln[-1] - 273.15) + '°C')
print('P=' + str(p_soln[-1]) + 'bar')
print('V0=' + str(v_soln[0]) + 'm^3/h')
print('V=' + str(v_soln[-1]) + 'm^3/h')
print('Kühlmittel: Gesättigtes $H_2O(l)$' +
' bei ' + '{:g}'.format(p_sat) + ' bar' +
'\n' + 'Verdampfungsenthalpie: ' +
'{:g}'.format(delta_h_sat) +
'kJ/kg' + '\n' + 'Kühlmittelmassenstrom: ' +
'{:g}'.format(sum(m_km_soln * dlr)) + 'kg/h')
print('Partikeldurchmesser für DeltaP=' +
'{:g}'.format(p_soln[0] - p_soln[-1]) + ' bar: ' +
'{:g}'.format(d_p) + ' m'
)
# International Journal of Hydrogen Energy, 40 (2015), 3472-3484
# Reaktor
n_t = 1
d_t = 9e-3 # m
l_r = 5.612e-2 # m
tot_v_kat = np.pi / 4 * d_t**2 * l_r # m^3
# Katalysator
phi = 0.457
rho_c = 1960 # kg Kat / m^3 Kat
rho_b = (1-phi) * rho_c # kgKat / m^3 Pellet
d_p = 6e-3 # m
# Betriebsbedingungen
t0 = 350.0 + 273.15 # K
p0 = 3 # bar
ghsv = 5000. # m^3/h / m^3_{Kat}
# Wärmetauschparameter
u = 0 # W/m^2/K
# Zulaufbedingungen
sc = 1.00 # Steam to Carbon ratio S/C = y_{H_2O, 0}/y_{CO, 0}
y_i0 = np.array([
18, 12, 70,
0, 0, 0,
0, 0, 0,
0
]) /(100 + 18*sc)
y_i0[namen.index('H2O')] = sc*18 / (100 + sc*18)
z_l_v.use_pr_eos()
# Stoechiometrische Koeffizienten
nuij = np.zeros([len(namen), 1])
# WGS Wassergasshiftreaktion (muss gleich sein als die Vorwärtsreaktion,
# wofür die Kinetik verfügbar ist)
nuij[[
namen.index('CO2'),
namen.index('H2'),
namen.index('MeOH'),
namen.index('H2O'),
namen.index('CO'),
namen.index('N2'),
], 0] = np.array([+1, +1, 0, -1, -1, 0], dtype=float)
delta_h_r_298 = nuij.T.dot(h_298) # J/mol
z_realgas_f_ntp = z_l_v.z_non_sat(
273.15, 1.0, y_i0, tc, pc, omega_af,
alpha_tr, epsilon, sigma, psi, omega)['z'].item()
z_realgas_f_0 = z_l_v.z_non_sat(
t0, p0, y_i0, tc, pc, omega_af,
alpha_tr, epsilon, sigma, psi, omega)['z'].item()
v_punkt_0 = ghsv/60.**2 * tot_v_kat # m^3/s
n_i_0 = v_punkt_0 * p0 * 1e5 / (8.3145 * t0 * z_realgas_f_0) * y_i0 # mol/s
m_punkt_0 = sum(n_i_0 * mm / 1000.) # kg/s
# Stoechiometrische Koeffizienten
nuij = np.zeros([len(namen), 1])
# WGS Wassergasshiftreaktion
nuij[[
namen.index('CO2'),
namen.index('H2'),
namen.index('MeOH'),
namen.index('H2O'),
namen.index('CO'),
namen.index('N2'),
], 0] = np.array([+1, +1, 0, -1, -1, 0], dtype=float)
delta_h_r_298 = nuij.T.dot(h_298) # J/mol
# Berechnung der Parameter
g = m_punkt_0 / (np.pi / 4 * d_t**2) / n_t # kg/m^2/s
mm_m_0 = sum(y_i0 * mm) * 1 / 1000. # kg/mol
cp_m_0 = sum(y_i0 * cp_ig_durch_r(t0) * 8.3145) # J/mol/K
cp_g_0 = cp_m_0 / mm_m_0 # J/kg/K
y_0 = np.empty(3)
y_0[0] = 0
y_0[-2] = p0
y_0[-1] = t0
v_kat = np.linspace(0, tot_v_kat, 20)
frac_v_kat = v_kat / tot_v_kat # dimlos
d_v_kat = 1 / (len(v_kat) - 1) * tot_v_kat # m^3
soln = odeint(lambda y, z0:
df_dt(y, z0, g, r_wgs_keiski),
y_0, v_kat)
xi_soln = soln[:, 0]
p_soln = soln[:, -2]
t_soln = soln[:, -1]
y_i = y_i0 + (nuij * xi_soln).T
u_co = xi_soln / y_i0[namen.index('CO')]
y_i_soln = np.zeros([len(v_kat), nuij.shape[0]])
n_i_soln = np.zeros_like(y_i_soln)
m_i_soln = np.zeros_like(y_i_soln)
n_soln = np.zeros_like(xi_soln)
mm_m_soln = np.zeros_like(xi_soln)
m_soln = np.zeros_like(xi_soln)
v_soln = np.zeros_like(xi_soln)
m_km_soln = np.zeros_like(xi_soln)
ums_soln = np.zeros_like(xi_soln)
for i in range(len(v_kat)):
y_i_soln[i] = y_i0 + nuij.dot(np.array([xi_soln[i]]))
mm_m_soln[i] = sum(y_i_soln[i] * mm * 1 / 1000.) # kg/mol
n_soln[i] = g * n_t * 60**2 * (np.pi / 4 * d_t**2) / mm_m_soln[i]
# kg/s/m^2 * 60^2s/h * m^2 / kg*mol = mol/h
m_soln[i] = g * n_t * 60**2 * (np.pi / 4 * d_t**2) # kg/h
n_i_soln[i] = n_soln[i] * y_i_soln[i] # mol/h
m_i_soln[i] = n_soln[i] * y_i_soln[i] * (mm * 1 / 1000.)
# mol/h * g/mol * 1kg/1000g
v_soln[i] = n_soln[i] * 8.3145 * 1e-5 * t_soln[i] / p_soln[i]
ums_soln[i] = (n_i_soln[0][namen.index('CO')] -
n_i_soln[i][namen.index('CO')]
) / n_i_soln[0][namen.index('CO')]
m_km_soln[i] = u * (2 / (d_t / 2)) * (t_soln[i] - t_r) * (
np.pi / 4 * d_t**2) / delta_h_sat * n_t * 60**2 / 1000.
# J/s/K/m^2 * 1/m * K * m^2 * kg/kJ * 60^2s/h * 1kJ/(1000J) = kg/h/m
vars_1 = [
[r'\rho_b', rho_b, r'\frac{kg_{Kat}}{m^3_{Schüttung}}'],
['\phi', phi, r'\frac{m^3_{Gas}}{m^3_{Schüttung}}'],
['D_p', d_p, 'm'],
]
vars_2 = [
['D_t', d_t, 'm'],
['L_R', l_r, 'm'],
['n_T', n_t, ''],
['T_0', t0 - 273.15, '°C'],
['P_0', p0, 'bar'],
]
vars_3 = [
['U', u, r'\frac{W}{m^2\cdot K}'],
['\dot m', m_dot, 'kg/s'],
['C_{p_g}', cp_g_0 / 1000., r'\frac{kJ}{kg\cdot K}'],
['NTU', ntu, ''],
['SN', sn, ''],
]
vars_4 = [
['T_r', t_r - 273.15, '°C_{Kühlmittel}'],
['P_{Sät}', p_sat, 'bar_{Kühlmittel}'],
['\Delta h_{v}', delta_h_sat, r'\frac{kJ}{kg_{Kühlmittel}}'],
]
text_1 = '\n'.join(['$' + ' = '.join([line[0], '{:g}'.format(line[1]) +
' ' + line[2]]) + '$'
for line in vars_1])
text_2 = '\n'.join(['$' + ' = '.join([line[0], '{:g}'.format(line[1]) +
' ' + line[2]]) + '$'
for line in vars_2])
text_3 = '\n'.join(['$' + ' = '.join([line[0], '{:g}'.format(line[1]) +
' ' + line[2]]) + '$'
for line in vars_3])
text_4 = '\n'.join(['$' + ' = '.join([line[0], '{:g}'.format(line[1]) +
' ' + line[2]]) + '$'
for line in vars_4])
fig = plt.figure(5)
fig.suptitle('Adiabates System \n' +
'International Journal of Hydrogen Energy, 40 (2015), 3472-3484')
fig.text(0.05, 0.89, text_1, va='top', fontsize=8)
fig.text(0.25, 0.89, text_2, va='top', fontsize=8)
fig.text(0.50, 0.89, text_3, va='top', fontsize=8)
fig.text(0.75, 0.89, text_4, va='top', fontsize=8)
ax = plt.subplot2grid([2, 3], [0, 0])
ax.plot(frac_v_kat, v_soln, label='$\dot V$')
ax.set_ylabel(r'$\frac{\dot V}{m^3/h}$')
ax.set_xlabel(r'$V_{Kat}/V_{Kat, ges}$')
ax2 = plt.subplot2grid([2, 3], [1, 0])
ax2.plot(frac_v_kat, m_km_soln)
ax2.fill_between(v_kat, 0, m_km_soln, color='orange')
ax2.text(0.3, 1 / 2. * (m_km_soln[0] + m_km_soln[-1]),
'{:g}'.format(sum(m_km_soln * d_v_kat)) + 'kg/h \n')
ax2.set_ylabel(r'$\frac{\dot m_{Kuehlmittel}}{\frac{kg}{h\cdot m}}$')
ax2.set_xlabel(r'$V_{Kat}/V_{Kat, ges}$')
ax3 = plt.subplot2grid([2, 3], [1, 1], colspan=2)
ax3.set_ylabel('Massenstrom / (kg/h)')
ax3.set_xlabel(r'$V_{Kat}/V_{Kat, ges}$')
for item in ['CO', 'H2O', 'H2', 'CO2']:
marker = np.random.choice(list(lines.lineMarkers.keys()))
index = namen.index(item)
ax3.plot(frac_v_kat, m_i_soln[:, index] * 60**2, label=item,
marker=marker)
ax3.legend(loc=1)
ax4 = plt.subplot2grid([2, 3], [0, 1])
ax4_1 = ax4.twinx()
ax4_1.set_ylabel('CO - Molanteil')
ax4.set_ylabel('Temperatur / °C')
ax4.set_xlabel(r'$V_{Kat}/V_{Kat, ges}$')
ax4.plot(frac_v_kat, t_soln - 273.15, label='T / °C')
ax4_1.plot(frac_v_kat, y_i_soln[:, namen.index('CO')],
ls='--', color='gray')
ax5 = plt.subplot2grid([2, 3], [0, 2], colspan=2)
ax5.set_ylabel('Druck / bar')
ax5.set_xlabel(r'$V_{Kat}/V_{Kat, ges}$')
ax5.plot(frac_v_kat, p_soln, label='p / bar')
plt.tight_layout(rect=[0, 0, 0.95, 0.75])
print('')
print('NTU= ' + '{:g}'.format(ntu))
print('\n'.join([
namen[i] + ': ' + '{:g}'.format(x) + ' kg/h'
for i, x in enumerate(m_i_soln[-1])
]))
print('T=' + str(t_soln[-1] - 273.15) + '°C')
print('P=' + str(p_soln[-1]) + 'bar')
print('V0=' + str(v_soln[0]) + 'm^3/h')
print('V=' + str(v_soln[-1]) + 'm^3/h')
print('Kühlmittel: Gesättigtes $H_2O(l)$' +
' bei ' + '{:g}'.format(p_sat) + ' bar' +
'\n' + 'Verdampfungsenthalpie: ' +
'{:g}'.format(delta_h_sat) +
'kJ/kg' + '\n' + 'Kühlmittelmassenstrom: ' +
'{:g}'.format(sum(m_km_soln * dlr)) + 'kg/h')
print('Partikeldurchmesser für DeltaP=' +
'{:g}'.format(p_soln[0] - p_soln[-1]) + ' bar: ' +
'{:g}'.format(d_p) + ' m'
)
if os.name == 'nt':
thisappid = plt.matplotlib.__package__ + plt.matplotlib.__version__
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(thisappid)
plt.show()
| mit |
voxlol/scikit-learn | sklearn/utils/tests/test_multiclass.py | 72 | 15350 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from functools import partial
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_label_indicator_matrix
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import is_sequence_of_sequences
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multilabel-sequences': [
[[0, 1]],
[[0], [1]],
[[1, 2, 3]],
[[1, 2, 1]], # duplicate values, why not?
[[1], [2], [0, 1]],
[[1], [2]],
[[]],
[()],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object')),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
# not currently supported sequence of sequences
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabels
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[(0, 1, 2), (0,), tuple(), (2, 1)]),
np.arange(3))
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[[0, 1, 2], [0], list(), [2, 1]]),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
# Some tests with strings input
assert_array_equal(unique_labels(["a", "b", "c"], ["d"]),
["a", "b", "c", "d"])
assert_array_equal(assert_warns(DeprecationWarning, unique_labels,
[["a", "b"], ["c"]], [["d"]]),
["a", "b", "c", "d"])
@ignore_warnings
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-sequences",
"multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
@ignore_warnings
def test_unique_labels_mixed_types():
# Mix of multilabel-indicator and multilabel-sequences
mix_multilabel_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multilabel-sequences"])
for y_multilabel, y_multiclass in mix_multilabel_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"] +
EXAMPLES["multilabel-sequences"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix string and number input type
assert_raises(ValueError, unique_labels, [[1, 2], [3]],
[["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [3]])
assert_array_equal(unique_labels([(2,), (0, 2,)], [(), ()]), [0, 2])
assert_array_equal(unique_labels([("2",), ("0", "2",)], [(), ()]),
["0", "2"])
@ignore_warnings
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group.startswith('multilabel'):
assert_, exp = assert_true, 'True'
else:
assert_, exp = assert_false, 'False'
for example in group_examples:
assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s' % (example, exp))
def test_is_label_indicator_matrix():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_label_indicator_matrix(exmpl_sparse),
msg=('is_label_indicator_matrix(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_label_indicator_matrix(example),
msg='is_label_indicator_matrix(%r) should be %s'
% (example, dense_exp))
def test_is_sequence_of_sequences():
for group, group_examples in iteritems(EXAMPLES):
if group == 'multilabel-sequences':
assert_, exp = assert_true, 'True'
check = partial(assert_warns, DeprecationWarning,
is_sequence_of_sequences)
else:
assert_, exp = assert_false, 'False'
check = is_sequence_of_sequences
for example in group_examples:
assert_(check(example),
msg='is_sequence_of_sequences(%r) should be %s'
% (example, exp))
@ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg='type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example)))
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
rvraghav93/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 31 | 9812 | from sklearn.utils.testing import assert_true
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
clf = PassiveAggressiveClassifier(
C=1.0, max_iter=30, fit_intercept=fit_intercept,
random_state=0, average=average, tol=None)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
if average:
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
for average in (False, True):
clf = PassiveAggressiveClassifier(
C=1.0, fit_intercept=True, random_state=0,
average=average, max_iter=5)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
if average:
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier(max_iter=5).fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(
C=1.0, loss=loss, fit_intercept=True, n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(
C=1.0, loss=loss, fit_intercept=True, max_iter=2,
shuffle=False, tol=None)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier(max_iter=100)
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, max_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, max_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced", max_iter=100)
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(
C=0.1, max_iter=1000, tol=None, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(
C=0.1, max_iter=1000, tol=None, class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(
C=0.1, max_iter=1000, tol=None, class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5}, max_iter=100)
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5], max_iter=100)
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch", max_iter=100)
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
reg = PassiveAggressiveRegressor(
C=1.0, fit_intercept=fit_intercept,
random_state=0, average=average, max_iter=5)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
if average:
assert_true(hasattr(reg, 'average_coef_'))
assert_true(hasattr(reg, 'average_intercept_'))
assert_true(hasattr(reg, 'standard_intercept_'))
assert_true(hasattr(reg, 'standard_coef_'))
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for average in (False, True):
reg = PassiveAggressiveRegressor(
C=1.0, fit_intercept=True, random_state=0,
average=average, max_iter=100)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
if average:
assert_true(hasattr(reg, 'average_coef_'))
assert_true(hasattr(reg, 'average_intercept_'))
assert_true(hasattr(reg, 'standard_intercept_'))
assert_true(hasattr(reg, 'standard_coef_'))
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(
C=1.0, loss=loss, fit_intercept=True, n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(
C=1.0, tol=None, loss=loss, fit_intercept=True, max_iter=2,
shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor(max_iter=100)
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
sbcrowe/pytrackrecord | trackrecord/__init__.py | 1 | 4279 | # -*- coding: utf-8 -*-
""" This module provides tools for visualisation of track record."""
# authorship information
__author__ = 'Scott Crowe'
__email__ = '[email protected]'
__license__ = "GPL3"
# import required code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
class ScopusAnalysis:
"""Performs analysis of Scopus track record export."""
def __init__(self, path):
# instantiate
self._path = path
self._data_frame = pd.read_csv(self._path)
def annual_output(self, years = None):
annual_output = Counter(self._data_frame.get('Year')).most_common()
if years is None:
return annual_output
else:
dao = dict(annual_output)
return list({year:dao[year] for year in years if year in dao}.items())
def total_output(self, start_year = 2012, number_years = 5):
years = range(start_year, start_year + number_years, 1)
tally = 0
for output in self.annual_output():
if output[0] in years:
tally += output[1]
return tally
def coauthor_count(self, limit = None):
author_lists = self._data_frame.get('Authors')
author_surnames = []
for author_list in author_lists:
author_surnames.extend(author_list.split(',')[::2])
author_surnames = [x.strip() for x in author_surnames]
if limit is None:
return Counter(author_surnames).most_common()[1:]
else:
return Counter(author_surnames).most_common(limit + 1)[1:]
def journal_count(self):
journal_names = self._data_frame.get('Source title')
return Counter(journal_names).most_common()
def h_index(self):
citations = self._data_frame.get('Cited by').values
citation_numbers = np.array(sorted([x for x in citations if not pd.isnull(x)], reverse=True))
h_index = 0
while h_index + 1 <= sum(citation_numbers > h_index):
h_index = h_index + 1
return h_index
def plot_differential_publication_histogram(self, moving_average = True):
years = np.sort(self._data_frame.get('Year').values)
labels, values = zip(*Counter(years).items())
plt.title('Histogram of annual publications')
plt.xlabel('Year')
plt.ylabel('# of publications')
plt.bar(labels, values)
if moving_average:
mv_years, mv_values = _moving_past_average(dict(zip(labels, values)), 5)
average, = plt.plot(mv_years, mv_values, 'r-', label='Moving average (past 5 years)')
plt.legend(handles=[average])
plt.show()
def plot_cumulative_publication_histogram(self):
years = np.sort(self._data_frame.get('Year').values)
plt.bar(years, np.arange(years.size))
plt.show()
class ePrintAnalysis:
"""Performs analysis of ePrint track record export."""
def __init__(self, path):
# instantiate
self._path = path
self._data_frame = pd.read_csv(self._path)
def annual_output(self, years = None):
annual_output = Counter(self._data_frame.get('Date Published')).most_common()
if years is None:
return annual_output
else:
dao = dict(annual_output)
return list({year:dao[year] for year in years if year in dao}.items())
def coauthor_count(self):
author_lists = self._data_frame.get('Authors/Creators')
author_surnames = []
for author_list in author_lists:
author_names = author_list.split('and')
for author_name in author_names:
author_surnames.append(author_name.split(',')[0])
author_surnames = [x.strip() for x in author_surnames]
return Counter(author_surnames).most_common()[1:]
def _moving_past_average(dictionary, window = 5):
mv_keys = list(range(min(dictionary), max(dictionary) + 1))
mv_values = []
for mv_key in mv_keys:
window_tally = 0
for sum_year in range(mv_key - window + 1, mv_key + 1):
if sum_year in dictionary:
window_tally += dictionary[sum_year]
mv_values.append(window_tally / window)
return mv_keys, mv_values | gpl-3.0 |
davebx/tools-iuc | tools/fsd/fsd_beforevsafter.py | 17 | 15642 | #!/usr/bin/env python
# Family size distribution of DCS from various steps of the Galaxy pipeline
#
# Author: Monika Heinzl & Gundula Povysil, Johannes-Kepler University Linz (Austria)
# Contact: [email protected]
#
# Takes a TXT file with tags of reads that were aligned to certain regions of the reference genome (optional),
# a TABULAR file with tags before the alignment to the SSCS, a FASTA file with reads that were part of the DCS and
# a FASTA file with tags after trimming as input (optional).
# The program produces a plot which shows the distribution of family sizes of the DCS from the input files and
# a CSV file with the data of the plot.
# USAGE: python FSD before vs after_no_refF1.3_FINAL.py --inputFile_SSCS filenameSSCS --inputName1 filenameSSCS --makeDCS filenameMakeDCS --afterTrimming filenameAfterTrimming --alignedTags DCSbamFile
# --output_tabular outputfile_name_tabular --output_pdf outputfile_name_pdf
import argparse
import sys
from collections import Counter
import matplotlib.pyplot as plt
import numpy
import pysam
from Bio import SeqIO
from matplotlib.backends.backend_pdf import PdfPages
plt.switch_backend('agg')
def readFileReferenceFree(file, delim):
with open(file, 'r') as dest_f:
data_array = numpy.genfromtxt(dest_f, skip_header=0, delimiter=delim, comments='#', dtype=str)
return data_array
def readFasta(file):
tag_consensus = []
fs_consensus = []
with open(file, "r") as consFile:
for record in SeqIO.parse(consFile, "fasta"):
tag_consensus.append(record.id)
line = record.description
a, b = line.split(" ")
fs1, fs2 = b.split("-")
fs_consensus.extend([fs1, fs2])
fs_consensus = numpy.array(fs_consensus).astype(int)
return (tag_consensus, fs_consensus)
def make_argparser():
parser = argparse.ArgumentParser(description='Analysis of read loss in duplex sequencing data')
parser.add_argument('--inputFile_SSCS',
help='Tabular File with three columns: ab or ba, tag and family size.')
parser.add_argument('--inputName1')
parser.add_argument('--makeDCS',
help='FASTA File with information about tag and family size in the header.')
parser.add_argument('--afterTrimming', default=None,
help='FASTA File with information about tag and family size in the header.')
parser.add_argument('--bamFile',
help='BAM file with aligned reads.')
parser.add_argument('--output_pdf', default="data.pdf", type=str,
help='Name of the pdf and tabular file.')
parser.add_argument('--output_tabular', default="data.tabular", type=str,
help='Name of the pdf and tabular file.')
return parser
def compare_read_families_read_loss(argv):
parser = make_argparser()
args = parser.parse_args(argv[1:])
SSCS_file = args.inputFile_SSCS
SSCS_file_name = args.inputName1
makeConsensus = args.makeDCS
afterTrimming = args.afterTrimming
ref_genome = args.bamFile
title_file = args.output_tabular
title_file2 = args.output_pdf
sep = "\t"
with open(title_file, "w") as output_file, PdfPages(title_file2) as pdf:
# PLOT
plt.rc('figure', figsize=(11.69, 8.27)) # A4 format
plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
plt.rcParams['patch.edgecolor'] = "black"
fig = plt.figure()
plt.subplots_adjust(bottom=0.3)
list1 = []
colors = []
labels = []
# data with tags of SSCS
data_array = readFileReferenceFree(SSCS_file, "\t")
seq = numpy.array(data_array[:, 1])
tags = numpy.array(data_array[:, 2])
quant = numpy.array(data_array[:, 0]).astype(int)
# split data with all tags of SSCS after ab and ba strands
all_ab = seq[numpy.where(tags == "ab")[0]]
all_ba = seq[numpy.where(tags == "ba")[0]]
quant_ab_sscs = quant[numpy.where(tags == "ab")[0]]
quant_ba_sscs = quant[numpy.where(tags == "ba")[0]]
seqDic_ab = dict(zip(all_ab, quant_ab_sscs))
seqDic_ba = dict(zip(all_ba, quant_ba_sscs))
# get tags of the SSCS which form a DCS
# group large family sizes
bigFamilies = numpy.where(quant > 20)[0]
quant[bigFamilies] = 22
maximumX = numpy.amax(quant)
# find all unique tags and get the indices for ALL tags (ab AND ba)
u, index_unique, c = numpy.unique(numpy.array(seq), return_counts=True, return_index=True)
d = u[c > 1]
# get family sizes, tag for the duplicates
duplTags_double = quant[numpy.in1d(seq, d)]
list1.append(duplTags_double)
colors.append("#0000FF")
labels.append("before SSCS building")
duplTags = duplTags_double[0::2] # ab of DCS
duplTagsBA = duplTags_double[1::2] # ba of DCS
d2 = d[(duplTags >= 3) & (duplTagsBA >= 3)] # ab and ba FS>=3
# all SSCSs FS>=3
seq_unique, seqUnique_index = numpy.unique(seq, return_index=True)
seq_unique_FS = quant[seqUnique_index]
seq_unique_FS3 = seq_unique_FS[seq_unique_FS >= 3]
legend1 = "\ntotal nr. of tags (unique, FS>=1):\nDCS (before SSCS building, FS>=1):\ntotal nr. of tags (unique, FS>=3):\nDCS (before SSCS building, FS>=3):"
legend2 = "total numbers * \n{:,}\n{:,}\n{:,}\n{:,}".format(len(seq_unique_FS), len(duplTags),
len(seq_unique_FS3), len(d2))
plt.text(0.55, 0.14, legend1, size=11, transform=plt.gcf().transFigure)
plt.text(0.88, 0.14, legend2, size=11, transform=plt.gcf().transFigure)
# data make DCS
tag_consensus, fs_consensus = readFasta(makeConsensus)
# group large family sizes in the plot of fasta files
bigFamilies = numpy.where(fs_consensus > 20)[0]
fs_consensus[bigFamilies] = 22
list1.append(fs_consensus)
colors.append("#298A08")
labels.append("after DCS building")
legend3 = "after DCS building:"
legend4 = "{:,}".format(len(tag_consensus))
plt.text(0.55, 0.11, legend3, size=11, transform=plt.gcf().transFigure)
plt.text(0.88, 0.11, legend4, size=11, transform=plt.gcf().transFigure)
# data after trimming
if afterTrimming is not None:
tag_trimming, fs_trimming = readFasta(afterTrimming)
bigFamilies = numpy.where(fs_trimming > 20)[0]
fs_trimming[bigFamilies] = 22
list1.append(fs_trimming)
colors.append("#DF0101")
labels.append("after trimming")
legend5 = "after trimming:"
legend6 = "{:,}".format(len(tag_trimming))
plt.text(0.55, 0.09, legend5, size=11, transform=plt.gcf().transFigure)
plt.text(0.88, 0.09, legend6, size=11, transform=plt.gcf().transFigure)
# data of tags aligned to reference genome
if ref_genome is not None:
pysam.index(ref_genome)
bam = pysam.AlignmentFile(ref_genome, "rb")
seq_mut = []
for read in bam.fetch():
if not read.is_unmapped:
if '_' in read.query_name:
tags = read.query_name.split('_')[0]
else:
tags = read.query_name
seq_mut.append(tags)
# use only unique tags that were alignment to the reference genome
seq_mut = numpy.array(seq_mut)
seq_mut, seqMut_index = numpy.unique(seq_mut, return_index=True)
# get family sizes for each tag in the BAM file
quant_ab = []
quant_ba = []
for i in seq_mut:
quant_ab.append(seqDic_ab.get(i))
quant_ba.append(seqDic_ba.get(i))
quant_ab_ref = numpy.array(quant_ab)
quant_ba_ref = numpy.array(quant_ba)
quant_all_ref = numpy.concatenate((quant_ab_ref, quant_ba_ref))
bigFamilies = numpy.where(quant_all_ref > 20)[0] # group large family sizes
quant_all_ref[bigFamilies] = 22
list1.append(quant_all_ref)
colors.append("#04cec7")
labels.append("after alignment\nto reference")
legend7 = "after alignment to reference:"
length_DCS_ref = len(quant_ba_ref) # count of duplex tags that were aligned to reference genome
legend8 = "{:,}".format(length_DCS_ref)
plt.text(0.55, 0.07, legend7, size=11, transform=plt.gcf().transFigure)
plt.text(0.88, 0.07, legend8, size=11, transform=plt.gcf().transFigure)
counts = plt.hist(list1, bins=range(-1, maximumX + 1), stacked=False, label=labels, color=colors, align="left", alpha=1, edgecolor="black", linewidth=1)
ticks = numpy.arange(0, maximumX, 1)
ticks1 = [str(_) for _ in ticks]
ticks1[len(ticks1) - 1] = ">20"
plt.xticks(numpy.array(ticks), ticks1)
if ref_genome is not None:
count = numpy.array([v for k, v in sorted(Counter(quant_ab_ref).items())]) # count all family sizes from all ab strands
legend = "max. family size:\nabsolute frequency:\nrelative frequency:\n\ntotal nr. of reads:\n(before SSCS building)"
plt.text(0.1, 0.085, legend, size=11, transform=plt.gcf().transFigure)
legend = "AB\n{}\n{}\n{:.5f}\n\n{:,}" \
.format(max(quant_ab_ref), count[len(count) - 1], float(count[len(count) - 1]) / sum(count),
sum(numpy.array(data_array[:, 0]).astype(int)))
plt.text(0.35, 0.105, legend, size=11, transform=plt.gcf().transFigure)
count2 = numpy.array(
[v for k, v in sorted(Counter(quant_ba_ref).items())]) # count all family sizes from all ba strands
legend = "BA\n{}\n{}\n{:.5f}" \
.format(max(quant_ba_ref), count2[len(count2) - 1], float(count2[len(count2) - 1]) / sum(count2))
plt.text(0.45, 0.1475, legend, size=11, transform=plt.gcf().transFigure)
legend4 = "* In the plot, the family sizes of ab and ba strands and of both duplex tags were used.\nWhereas the total numbers indicate only the single count of the formed duplex tags."
plt.text(0.1, 0.02, legend4, size=11, transform=plt.gcf().transFigure)
plt.legend(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon=True)
plt.title("Family size distribution of tags from various steps of the Du Novo pipeline", fontsize=14)
plt.xlabel("Family size", fontsize=14)
plt.ylabel("Absolute Frequency", fontsize=14)
plt.grid(b=True, which="major", color="#424242", linestyle=":")
plt.margins(0.01, None)
pdf.savefig(fig, bbox_inch="tight")
plt.close()
# write information about plot into a csv file
output_file.write("Dataset:{}{}\n".format(sep, SSCS_file_name))
if ref_genome is not None:
output_file.write("{}AB{}BA\n".format(sep, sep))
output_file.write("max. family size:{}{}{}{}\n".format(sep, max(quant_ab_ref), sep, max(quant_ba_ref)))
output_file.write(
"absolute frequency:{}{}{}{}\n".format(sep, count[len(count) - 1], sep, count2[len(count2) - 1]))
output_file.write(
"relative frequency:{}{:.3f}{}{:.3f}\n\n".format(sep, float(count[len(count) - 1]) / sum(count), sep,
float(count2[len(count2) - 1]) / sum(count2)))
output_file.write("\ntotal nr. of reads before SSCS building{}{}\n".format(sep, sum(numpy.array(data_array[:, 0]).astype(int))))
output_file.write("\n\nValues from family size distribution\n")
if afterTrimming is None and ref_genome is None:
if afterTrimming is None:
output_file.write("{}before SSCS building{}after DCS building\n".format(sep, sep))
elif ref_genome is None:
output_file.write("{}before SSCS building{}atfer DCS building\n".format(sep, sep))
for fs, sscs, dcs in zip(counts[1][2:len(counts[1])], counts[0][0][2:len(counts[0][0])], counts[0][1][2:len(counts[0][1])]):
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}{}{}{}\n".format(fs, sep, int(sscs), sep, int(dcs)))
output_file.write("sum{}{}{}{}\n".format(sep, int(sum(counts[0][0])), sep, int(sum(counts[0][1]))))
elif afterTrimming is None or ref_genome is None:
if afterTrimming is None:
output_file.write("{}before SSCS building{}after DCS building{}after alignment to reference\n".format(sep, sep, sep))
elif ref_genome is None:
output_file.write("{}before SSCS building{}atfer DCS building{}after trimming\n".format(sep, sep, sep))
for fs, sscs, dcs, reference in zip(counts[1][2:len(counts[1])], counts[0][0][2:len(counts[0][0])], counts[0][1][2:len(counts[0][1])], counts[0][2][2:len(counts[0][2])]):
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}{}{}{}{}{}\n".format(fs, sep, int(sscs), sep, int(dcs), sep, int(reference)))
output_file.write("sum{}{}{}{}{}{}\n".format(sep, int(sum(counts[0][0])), sep, int(sum(counts[0][1])), sep, int(sum(counts[0][2]))))
else:
output_file.write("{}before SSCS building{}after DCS building{}after trimming{}after alignment to reference\n".format(sep, sep, sep, sep))
for fs, sscs, dcs, trim, reference in zip(counts[1][2:len(counts[1])], counts[0][0][2:len(counts[0][0])], counts[0][1][2:len(counts[0][1])], counts[0][2][2:len(counts[0][2])], counts[0][3][2:len(counts[0][3])]):
if fs == 21:
fs = ">20"
else:
fs = "={}".format(fs)
output_file.write("FS{}{}{}{}{}{}{}{}{}\n".format(fs, sep, int(sscs), sep, int(dcs), sep, int(trim), sep, int(reference)))
output_file.write("sum{}{}{}{}{}{}{}{}\n".format(sep, int(sum(counts[0][0])), sep, int(sum(counts[0][1])), sep, int(sum(counts[0][2])), sep, int(sum(counts[0][3]))))
output_file.write("\n\nIn the plot, the family sizes of ab and ba strands and of both duplex tags were used.\nWhereas the total numbers indicate only the single count of the formed duplex tags.\n")
output_file.write("total nr. of tags (unique, FS>=1){}{}\n".format(sep, len(seq_unique_FS)))
output_file.write("DCS (before SSCS building, FS>=1){}{}\n".format(sep, len(duplTags)))
output_file.write("total nr. of tags (unique, FS>=3){}{}\n".format(sep, len(seq_unique_FS3)))
output_file.write("DCS (before SSCS building, FS>=3){}{}\n".format(sep, len(d2)))
output_file.write("after DCS building{}{}\n".format(sep, len(tag_consensus)))
if afterTrimming is not None:
output_file.write("after trimming{}{}\n".format(sep, len(tag_trimming)))
if ref_genome is not None:
output_file.write("after alignment to reference{}{}\n".format(sep, length_DCS_ref))
print("Files successfully created!")
if __name__ == '__main__':
sys.exit(compare_read_families_read_loss(sys.argv))
| mit |
detrout/debian-statsmodels | statsmodels/examples/ex_feasible_gls_het_0.py | 34 | 6454 | # -*- coding: utf-8 -*-
"""Examples for linear model with heteroscedasticity estimated by feasible GLS
These are examples to check the results during developement.
The assumptions:
We have a linear model y = X*beta where the variance of an observation depends
on some explanatory variable Z (`exog_var`).
linear_model.WLS estimated the model for a given weight matrix
here we want to estimate also the weight matrix by two step or iterative WLS
Created on Wed Dec 21 12:28:17 2011
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.regression.linear_model import OLS, WLS, GLS
from statsmodels.regression.feasible_gls import GLSHet, GLSHet2
from statsmodels.tools.tools import add_constant
examples = ['ex1']
if 'ex1' in examples:
nsample = 300 #different pattern last graph with 100 or 200 or 500
sig = 0.5
np.random.seed(9876789) #9876543)
X = np.random.randn(nsample, 3)
X = np.column_stack((np.ones((nsample,1)), X))
beta = [1, 0.5, -0.5, 1.]
y_true2 = np.dot(X, beta)
x1 = np.linspace(0, 1, nsample)
gamma = np.array([1, 3.])
#with slope 3 instead of two, I get negative weights, Not correct
# - was misspecified, but the negative weights are still possible with identity link
#gamma /= gamma.sum() #normalize assuming x1.max is 1
z_true = add_constant(x1)
winv = np.dot(z_true, gamma)
het_params = sig**2 * np.array([1, 3.]) # for squared
sig2_het = sig**2 * winv
weights_dgp = 1/winv
weights_dgp /= weights_dgp.max() #should be already normalized - NOT check normalization
#y2[:nsample*6/10] = y_true2[:nsample*6/10] + sig*1. * np.random.normal(size=nsample*6/10)
z0 = np.zeros(nsample)
z0[(nsample * 5)//10:] = 1 #dummy for 2 halfs of sample
z0 = add_constant(z0)
z1 = add_constant(x1)
noise = np.sqrt(sig2_het) * np.random.normal(size=nsample)
y2 = y_true2 + noise
X2 = X[:,[0,2]] #misspecified, missing regressor in main equation
X2 = X #correctly specigied
res_ols = OLS(y2, X2).fit()
print('OLS beta estimates')
print(res_ols.params)
print('OLS stddev of beta')
print(res_ols.bse)
print('\nWLS')
mod0 = GLSHet2(y2, X2, exog_var=winv)
res0 = mod0.fit()
print('new version')
mod1 = GLSHet(y2, X2, exog_var=winv)
res1 = mod1.iterative_fit(2)
print('WLS beta estimates')
print(res1.params)
print(res0.params)
print('WLS stddev of beta')
print(res1.bse)
#compare with previous version GLSHet2, refactoring check
#assert_almost_equal(res1.params, np.array([ 0.37642521, 1.51447662]))
#this fails ??? more iterations? different starting weights?
print(res1.model.weights/res1.model.weights.max())
#why is the error so small in the estimated weights ?
assert_almost_equal(res1.model.weights/res1.model.weights.max(), weights_dgp, 14)
print('residual regression params')
print(res1.results_residual_regression.params)
print('scale of model ?')
print(res1.scale)
print('unweighted residual variance, note unweighted mean is not zero')
print(res1.resid.var())
#Note weighted mean is zero:
#(res1.model.weights * res1.resid).mean()
doplots = True #False
if doplots:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, 'r-', label='fwls')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
#the next only works if w has finite support, discrete/categorical
#z = (w[:,None] == [1,4]).astype(float) #dummy variable
#z = (w0[:,None] == np.unique(w0)).astype(float) #dummy variable
#changed z0 contains dummy and constant
mod2 = GLSHet(y2, X2, exog_var=z0)
res2 = mod2.iterative_fit(3)
print(res2.params)
import statsmodels.api as sm
#z = sm.add_constant(w, prepend=True)
z = sm.add_constant(x1/x1.max())
mod3 = GLSHet(y2, X2, exog_var=z1)#, link=sm.families.links.log())
res3 = mod3.iterative_fit(20)
error_var_3 = res3.mse_resid/res3.model.weights
print(res3.params)
print("np.array(res3.model.history['ols_params'])")
print(np.array(res3.model.history['ols_params']))
print("np.array(res3.model.history['self_params'])")
print(np.array(res3.model.history['self_params']))
#Models 2 and 3 are equivalent with different parameterization of Z
print(np.unique(res2.model.weights)) #for discrete z only, only a few uniques
print(np.unique(res3.model.weights))
print(res3.summary())
print('\n\nResults of estimation of weights')
print('--------------------------------')
print(res3.results_residual_regression.summary())
if doplots:
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, '-', label='fwls1')
plt.plot(x1, res2.fittedvalues, '-', label='fwls2')
plt.plot(x1, res3.fittedvalues, '-', label='fwls3')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
plt.figure()
plt.ylim(0, 5)
res_e2 = OLS(noise**2, z).fit()
plt.plot(noise**2, 'bo', alpha=0.5, label='dgp error**2')
plt.plot(res_e2.fittedvalues, lw=2, label='ols for noise**2')
#plt.plot(res3.model.weights, label='GLSHet weights')
plt.plot(error_var_3, lw=2, label='GLSHet error var')
plt.plot(res3.resid**2, 'ro', alpha=0.5, label='resid squared')
#plt.plot(weights_dgp, label='DGP weights')
plt.plot(sig**2 * winv, lw=2, label='DGP error var')
plt.legend()
plt.show()
'''Note these are close but maybe biased because of skewed distribution
>>> res3.mse_resid/res3.model.weights[-10:]
array([ 1.03115871, 1.03268209, 1.03420547, 1.03572885, 1.03725223,
1.03877561, 1.04029899, 1.04182237, 1.04334575, 1.04486913])
>>> res_e2.fittedvalues[-10:]
array([ 1.0401953 , 1.04171386, 1.04323242, 1.04475098, 1.04626954,
1.0477881 , 1.04930666, 1.05082521, 1.05234377, 1.05386233])
>>> sig**2 * w[-10:]
array([ 0.98647295, 0.98797595, 0.98947896, 0.99098196, 0.99248497,
0.99398798, 0.99549098, 0.99699399, 0.99849699, 1. ])
'''
| bsd-3-clause |
barentsen/dave | trapezoidFit/smoothn.py | 1 | 7519 | import numpy as np
from scipy import interpolate as interp
from scipy.fftpack import dct
from numpy import linalg as LA
import scipy.optimize as opt
import matplotlib.pyplot as plt
def smoothn(yin, w=None, s=None, robust=True, tolZ=1.0e-5, maxIter=100):
"""Perfom the penalized least-squares smoothing of data of Garcia, D. (2010)
http://www.biomecardio.com/matlab/smoothn.html
The smoothing allows for iterative robust smoothing with missing data.
The smoothing parameter can be automatically determined using a
generalized cross-validation score method.
Originally implemented in MATLAB by
AUTHOR Damien Garcia
Ported to python by
AUTHOR: Christopher J Burke
***Currently limited to the 1D case***
For missing, corrupted, or in-transit data that you dont want
to influence the fit it, is not sufficient to set the weight
value to 0 for the bad data points. In addition to setting
the weight=0, you MUST also do one of the two following choices.
1) Also set the bad data to NaN in the data vector (y), and this
function will use linear interpolation across the gap
to fill in approximate values OR
2) Interpolate across the gap to fill in the bad data in the data vector
before calling this function
INPUT:
yin - data vector one wants to find a smoothing function for
w - [0-1] data weights
s - smoothing parameter if not specified it is determined with GCVS
robust - Perform iterative reweighting for outliers.
tolZ - relative tolerance for change between iterations
maxIter - maximum number of iterations for convergence
OUTPUT:
z - smoothed model for data vector
w - final weighting vector
s - final smoothing parameter
exitflag - flag if solution converged before maxIter
"""
# Force y to be numpy double array and a copy
y = np.array(yin, dtype=np.double, copy=True)
sizy = y.size
noe = sizy
if noe < 2: # Too few elements return and do nothging
z = y
return z
# Check for weights
# weighted fit is performed if w vector is an argument OR
# non finite values appear in data vector
isWeighted = False
if w is None:
w = np.full_like(y, 1.0)
else:
isWeighted = True
isFinite = np.isfinite(y)
nof = isFinite.sum()
if not isFinite.all():
isWeighted = True
w = np.where(isFinite, w, 0.0)
w = w / w.max()
# autosmoothing
isAuto = False
if s is None:
isAuto = True
# Creation of the Lambda tensor
lam = np.zeros_like(y)
lam = -2.0 + 2.0 * np.cos((np.linspace(1.0,sizy,sizy)-1.0)*np.pi/sizy)
if not isAuto:
gamma = 1.0 / (1.0 + s * lam**2)
#Upper and lower bounds of smoothness parameter
hMin = 5.0e-3
hMax = 0.99
usePow = 2.0
tmp = 1.0 + np.sqrt(1.0 + 8.0 * hMax**usePow)
sMinBnd = ((tmp / 4.0 / hMax**usePow)**2 - 1.0) / 16.0
tmp = 1.0 + np.sqrt(1.0 + 8.0 * hMin**usePow)
sMaxBnd = ((tmp / 4.0 / hMin**usePow)**2 - 1.0) / 16.0
# Initialize a rough guess at the smooth function if weighting is involved
wTot = w
if isWeighted:
z = initialGuess(y, np.isfinite(y))
else:
z = np.zeros_like(y)
z0 = z
# Do linear interpolation for nans in data vector
if not isFinite.all():
fullx = np.arange(len(y))
gdIdx = np.where(isFinite)[0]
tmpx = fullx[gdIdx]
tmpy = y[gdIdx]
funcinterp = interp.interp1d(tmpx, tmpy, kind='linear')
y = funcinterp(fullx)
tol = 1.0
robustIterativeProcess = True
robustStep = 1
nit = 0
# Relaxation Factor
RF = 1.0
if isWeighted:
RF = RF + 0.75
# Main iterative Loop
while robustIterativeProcess:
# amount of weights
aow = wTot.sum() / noe
while tol > tolZ and nit < maxIter:
nit = nit + 1
dcty = dct(wTot * (y - z) + z, type=2, norm='ortho')
if isAuto and np.remainder(np.log2(nit),1) == 0:
allOutput = opt.minimize_scalar(gcv, \
bounds=[np.log10(sMinBnd),np.log10(sMaxBnd)], \
args=(y, lam, dcty, wTot, isFinite, aow, noe, nof), \
method='bounded', tol=None, \
options={'xatol':1.0e-1})
p = allOutput['x']
s = 10.0**p
gamma = 1.0 / (1.0 + s * lam**2)
z = RF * dct(gamma * dcty, type=3, norm='ortho') + (1.0 - RF) * z
tol = LA.norm(z0 - z) / LA.norm(z)
if not isWeighted: # if no weighted/missing data tol=0.0 (no iter)
tol = 0.0
z0 = z # save last output
exitFlag = nit < maxIter
if robust: # robust smoothing iteratively re-weight outliers
# average leverage
h = np.sqrt(1.0 + 16.0 * s)
h = np.sqrt(1.0 + h) / np.sqrt(2.0) / h
# take robust weights into account
wTot = w * robustWeights(y-z, isFinite, h)
# reinitialize for another iteration
isWeighted = True
tol = 1.0
nit = 0
robustStep = robustStep +1
robustIterativeProcess = robustStep < 4 # Max of 3 robust steps
else:
robustIterativeProcess = False # No iterations needed
return z, w, s, exitFlag
def initialGuess(y, iFin ):
z = y
if not iFin.all():
# Do linear interpolation for missing NaN data
fullx = np.arange(len(y))
gdIdx = np.where(iFin)[0]
tmpx = fullx[gdIdx]
tmpy = y[gdIdx]
funcinterp = interp.interp1d(tmpx, tmpy, kind='linear')
z = funcinterp(fullx)
z = dct(z, type=2, norm='ortho')
zeroIdx = np.ceil(len(z)/10)
z[zeroIdx:] = 0.0
z = dct(z, type=3, norm='ortho')
return z
def gcv(p, y, lam, dcty, wTot, iFin, aow, noe, nof):
s = 10.0**p
gamma = 1.0 / (1.0 + s * lam**2)
if aow > 0.9:
rss = LA.norm(dcty * (gamma - 1.0))**2
else:
yhat = dct(gamma * dcty, type=3, norm='ortho')
gdIdx = np.where(iFin)[0]
rss = LA.norm(np.sqrt(wTot[gdIdx]) * (y[gdIdx] -
yhat[gdIdx]))**2
trH = gamma.sum()
return rss / nof / (1.0 - trH/noe)**2
def robustWeights(r, iFin, h):
gdIdx = np.where(iFin)[0]
mad = np.median(abs(r[gdIdx] - np.median(r[gdIdx]))) #median abs deviation
u = np.abs(r / (1.4826 * mad) / np.sqrt(1.-h)) # studentized residuals
c = 4.685
u = u / c
u2 = u * u
w = (1.0 - u2)**2
w = np.where(u > 1.0, 0.0, w)
w = np.where(np.logical_not(iFin), 0.0, w)
w = np.where(np.logical_not(np.isfinite(w)), 0.0, w)
return w
# Run the test of the smoothn
if __name__ == "__main__":
x = np.linspace(0,100,2**8)
y = np.cos(x/10.0)+(x/50.0)**2 + np.random.randn(len(x))/10.0;
y[[69, 74, 79]] = np.array([5.5, 5, 6])
plt.plot(x,y,'.')
z = smoothn(y, robust=False) # Regular smoothing
plt.plot(x,z[0],'-r')
plt.show()
zr = smoothn(y, robust=True) # Robust smoothing
plt.plot(x,y,'.')
plt.plot(x,zr[0],'-r')
plt.show()
ynew = np.array(y, copy=True)
ynew[100:110] = np.nan
zmr = smoothn(ynew, robust=True)
plt.plot(x,ynew,'.')
plt.plot(x,zmr[0],'-r')
plt.show()
| mit |
PawarPawan/h2o-v3 | h2o-py/tests/testdir_algos/kmeans/pyunit_get_modelKmeans.py | 3 | 1090 | import sys
sys.path.insert(1, "../../../")
import h2o
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
def get_modelKmeans(ip,port):
# Connect to a pre-existing cluster
# connect to localhost:54321
#Log.info("Importing benign.csv data...\n")
benign_h2o = h2o.import_file(path=h2o.locate("smalldata/logreg/benign.csv"))
#benign_h2o.summary()
benign_sci = np.genfromtxt(h2o.locate("smalldata/logreg/benign.csv"), delimiter=",")
# Impute missing values with column mean
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
benign_sci = imp.fit_transform(benign_sci)
for i in range(2,7):
# Log.info("H2O K-Means")
km_h2o = h2o.kmeans(x=benign_h2o, k=i)
km_h2o.show()
model = h2o.get_model(km_h2o._id)
model.show()
km_sci = KMeans(n_clusters=i, init='k-means++', n_init=1)
km_sci.fit(benign_sci)
print "sckit centers"
print km_sci.cluster_centers_
if __name__ == "__main__":
h2o.run_test(sys.argv, get_modelKmeans)
| apache-2.0 |
jhamman/xarray | xarray/core/indexing.py | 1 | 50025 | import enum
import functools
import operator
from collections import defaultdict
from contextlib import suppress
from datetime import timedelta
from typing import Any, Callable, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from . import duck_array_ops, nputils, utils
from .npcompat import DTypeLike
from .pycompat import dask_array_type, integer_types, sparse_array_type
from .utils import is_dict_like, maybe_cast_to_coords_dtype
def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError("too many indices")
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key)
def _expand_slice(slice_, size):
return np.arange(*slice_.indices(size))
def _sanitize_slice_element(x):
from .variable import Variable
from .dataarray import DataArray
if isinstance(x, (Variable, DataArray)):
x = x.values
if isinstance(x, np.ndarray):
if x.ndim != 0:
raise ValueError(
f"cannot use non-scalar arrays in a slice for xarray indexing: {x}"
)
x = x[()]
if isinstance(x, np.timedelta64):
# pandas does not support indexing with np.timedelta64 yet:
# https://github.com/pandas-dev/pandas/issues/20393
x = pd.Timedelta(x)
return x
def _asarray_tuplesafe(values):
"""
Convert values into a numpy array of at most 1-dimension, while preserving
tuples.
Adapted from pandas.core.common._asarray_tuplesafe
"""
if isinstance(values, tuple):
result = utils.to_0d_object_array(values)
else:
result = np.asarray(values)
if result.ndim == 2:
result = np.empty(len(values), dtype=object)
result[:] = values
return result
def _is_nested_tuple(possible_tuple):
return isinstance(possible_tuple, tuple) and any(
isinstance(value, (tuple, list, slice)) for value in possible_tuple
)
def get_indexer_nd(index, labels, method=None, tolerance=None):
"""Wrapper around :meth:`pandas.Index.get_indexer` supporting n-dimensional
labels
"""
flat_labels = np.ravel(labels)
flat_indexer = index.get_indexer(flat_labels, method=method, tolerance=tolerance)
indexer = flat_indexer.reshape(labels.shape)
return indexer
def convert_label_indexer(index, label, index_name="", method=None, tolerance=None):
"""Given a pandas.Index and labels (e.g., from __getitem__) for one
dimension, return an indexer suitable for indexing an ndarray along that
dimension. If `index` is a pandas.MultiIndex and depending on `label`,
return a new pandas.Index or pandas.MultiIndex (otherwise return None).
"""
new_index = None
if isinstance(label, slice):
if method is not None or tolerance is not None:
raise NotImplementedError(
"cannot use ``method`` argument if any indexers are " "slice objects"
)
indexer = index.slice_indexer(
_sanitize_slice_element(label.start),
_sanitize_slice_element(label.stop),
_sanitize_slice_element(label.step),
)
if not isinstance(indexer, slice):
# unlike pandas, in xarray we never want to silently convert a
# slice indexer into an array indexer
raise KeyError(
"cannot represent labeled-based slice indexer for dimension "
f"{index_name!r} with a slice over integer positions; the index is "
"unsorted or non-unique"
)
elif is_dict_like(label):
is_nested_vals = _is_nested_tuple(tuple(label.values()))
if not isinstance(index, pd.MultiIndex):
raise ValueError(
"cannot use a dict-like object for selection on "
"a dimension that does not have a MultiIndex"
)
elif len(label) == index.nlevels and not is_nested_vals:
indexer = index.get_loc(tuple(label[k] for k in index.names))
else:
for k, v in label.items():
# index should be an item (i.e. Hashable) not an array-like
if isinstance(v, Sequence) and not isinstance(v, str):
raise ValueError(
"Vectorized selection is not "
"available along level variable: " + k
)
indexer, new_index = index.get_loc_level(
tuple(label.values()), level=tuple(label.keys())
)
# GH2619. Raise a KeyError if nothing is chosen
if indexer.dtype.kind == "b" and indexer.sum() == 0:
raise KeyError(f"{label} not found")
elif isinstance(label, tuple) and isinstance(index, pd.MultiIndex):
if _is_nested_tuple(label):
indexer = index.get_locs(label)
elif len(label) == index.nlevels:
indexer = index.get_loc(label)
else:
indexer, new_index = index.get_loc_level(
label, level=list(range(len(label)))
)
else:
label = (
label
if getattr(label, "ndim", 1) > 1 # vectorized-indexing
else _asarray_tuplesafe(label)
)
if label.ndim == 0:
if isinstance(index, pd.MultiIndex):
indexer, new_index = index.get_loc_level(label.item(), level=0)
else:
indexer = index.get_loc(
label.item(), method=method, tolerance=tolerance
)
elif label.dtype.kind == "b":
indexer = label
else:
if isinstance(index, pd.MultiIndex) and label.ndim > 1:
raise ValueError(
"Vectorized selection is not available along "
"MultiIndex variable: " + index_name
)
indexer = get_indexer_nd(index, label, method, tolerance)
if np.any(indexer < 0):
raise KeyError(f"not all values found in index {index_name!r}")
return indexer, new_index
def get_dim_indexers(data_obj, indexers):
"""Given a xarray data object and label based indexers, return a mapping
of label indexers with only dimension names as keys.
It groups multiple level indexers given on a multi-index dimension
into a single, dictionary indexer for that dimension (Raise a ValueError
if it is not possible).
"""
invalid = [
k
for k in indexers
if k not in data_obj.dims and k not in data_obj._level_coords
]
if invalid:
raise ValueError(f"dimensions or multi-index levels {invalid!r} do not exist")
level_indexers = defaultdict(dict)
dim_indexers = {}
for key, label in indexers.items():
dim, = data_obj[key].dims
if key != dim:
# assume here multi-index level indexer
level_indexers[dim][key] = label
else:
dim_indexers[key] = label
for dim, level_labels in level_indexers.items():
if dim_indexers.get(dim, False):
raise ValueError(
"cannot combine multi-index level indexers with an indexer for "
f"dimension {dim}"
)
dim_indexers[dim] = level_labels
return dim_indexers
def remap_label_indexers(data_obj, indexers, method=None, tolerance=None):
"""Given an xarray data object and label based indexers, return a mapping
of equivalent location based indexers. Also return a mapping of updated
pandas index objects (in case of multi-index level drop).
"""
if method is not None and not isinstance(method, str):
raise TypeError("``method`` must be a string")
pos_indexers = {}
new_indexes = {}
dim_indexers = get_dim_indexers(data_obj, indexers)
for dim, label in dim_indexers.items():
try:
index = data_obj.indexes[dim]
except KeyError:
# no index for this dimension: reuse the provided labels
if method is not None or tolerance is not None:
raise ValueError(
"cannot supply ``method`` or ``tolerance`` "
"when the indexed dimension does not have "
"an associated coordinate."
)
pos_indexers[dim] = label
else:
coords_dtype = data_obj.coords[dim].dtype
label = maybe_cast_to_coords_dtype(label, coords_dtype)
idxr, new_idx = convert_label_indexer(index, label, dim, method, tolerance)
pos_indexers[dim] = idxr
if new_idx is not None:
new_indexes[dim] = new_idx
return pos_indexers, new_indexes
def slice_slice(old_slice, applied_slice, size):
"""Given a slice and the size of the dimension to which it will be applied,
index it with another slice to return a new slice equivalent to applying
the slices sequentially
"""
step = (old_slice.step or 1) * (applied_slice.step or 1)
# For now, use the hack of turning old_slice into an ndarray to reconstruct
# the slice start and stop. This is not entirely ideal, but it is still
# definitely better than leaving the indexer as an array.
items = _expand_slice(old_slice, size)[applied_slice]
if len(items) > 0:
start = items[0]
stop = items[-1] + int(np.sign(step))
if stop < 0:
stop = None
else:
start = 0
stop = 0
return slice(start, stop, step)
def _index_indexer_1d(old_indexer, applied_indexer, size):
assert isinstance(applied_indexer, integer_types + (slice, np.ndarray))
if isinstance(applied_indexer, slice) and applied_indexer == slice(None):
# shortcut for the usual case
return old_indexer
if isinstance(old_indexer, slice):
if isinstance(applied_indexer, slice):
indexer = slice_slice(old_indexer, applied_indexer, size)
else:
indexer = _expand_slice(old_indexer, size)[applied_indexer]
else:
indexer = old_indexer[applied_indexer]
return indexer
class ExplicitIndexer:
"""Base class for explicit indexer objects.
ExplicitIndexer objects wrap a tuple of values given by their ``tuple``
property. These tuples should always have length equal to the number of
dimensions on the indexed array.
Do not instantiate BaseIndexer objects directly: instead, use one of the
sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer.
"""
__slots__ = ("_key",)
def __init__(self, key):
if type(self) is ExplicitIndexer:
raise TypeError("cannot instantiate base ExplicitIndexer objects")
self._key = tuple(key)
@property
def tuple(self):
return self._key
def __repr__(self):
return f"{type(self).__name__}({self.tuple})"
def as_integer_or_none(value):
return None if value is None else operator.index(value)
def as_integer_slice(value):
start = as_integer_or_none(value.start)
stop = as_integer_or_none(value.stop)
step = as_integer_or_none(value.step)
return slice(start, stop, step)
class BasicIndexer(ExplicitIndexer):
"""Tuple for basic indexing.
All elements should be int or slice objects. Indexing follows NumPy's
rules for basic indexing: each axis is independently sliced and axes
indexed with an integer are dropped from the result.
"""
__slots__ = ()
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError(f"key must be a tuple: {key!r}")
new_key = []
for k in key:
if isinstance(k, integer_types):
k = int(k)
elif isinstance(k, slice):
k = as_integer_slice(k)
else:
raise TypeError(
f"unexpected indexer type for {type(self).__name__}: {k!r}"
)
new_key.append(k)
super().__init__(new_key)
class OuterIndexer(ExplicitIndexer):
"""Tuple for outer/orthogonal indexing.
All elements should be int, slice or 1-dimensional np.ndarray objects with
an integer dtype. Indexing is applied independently along each axis, and
axes indexed with an integer are dropped from the result. This type of
indexing works like MATLAB/Fortran.
"""
__slots__ = ()
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError(f"key must be a tuple: {key!r}")
new_key = []
for k in key:
if isinstance(k, integer_types):
k = int(k)
elif isinstance(k, slice):
k = as_integer_slice(k)
elif isinstance(k, np.ndarray):
if not np.issubdtype(k.dtype, np.integer):
raise TypeError(
f"invalid indexer array, does not have integer dtype: {k!r}"
)
if k.ndim != 1:
raise TypeError(
f"invalid indexer array for {type(self).__name__}; must have "
f"exactly 1 dimension: {k!r}"
)
k = np.asarray(k, dtype=np.int64)
else:
raise TypeError(
f"unexpected indexer type for {type(self).__name__}: {k!r}"
)
new_key.append(k)
super().__init__(new_key)
class VectorizedIndexer(ExplicitIndexer):
"""Tuple for vectorized indexing.
All elements should be slice or N-dimensional np.ndarray objects with an
integer dtype and the same number of dimensions. Indexing follows proposed
rules for np.ndarray.vindex, which matches NumPy's advanced indexing rules
(including broadcasting) except sliced axes are always moved to the end:
https://github.com/numpy/numpy/pull/6256
"""
__slots__ = ()
def __init__(self, key):
if not isinstance(key, tuple):
raise TypeError(f"key must be a tuple: {key!r}")
new_key = []
ndim = None
for k in key:
if isinstance(k, slice):
k = as_integer_slice(k)
elif isinstance(k, np.ndarray):
if not np.issubdtype(k.dtype, np.integer):
raise TypeError(
f"invalid indexer array, does not have integer dtype: {k!r}"
)
if ndim is None:
ndim = k.ndim
elif ndim != k.ndim:
ndims = [k.ndim for k in key if isinstance(k, np.ndarray)]
raise ValueError(
"invalid indexer key: ndarray arguments "
f"have different numbers of dimensions: {ndims}"
)
k = np.asarray(k, dtype=np.int64)
else:
raise TypeError(
f"unexpected indexer type for {type(self).__name__}: {k!r}"
)
new_key.append(k)
super().__init__(new_key)
class ExplicitlyIndexed:
"""Mixin to mark support for Indexer subclasses in indexing.
"""
__slots__ = ()
class ExplicitlyIndexedNDArrayMixin(utils.NDArrayMixin, ExplicitlyIndexed):
__slots__ = ()
def __array__(self, dtype=None):
key = BasicIndexer((slice(None),) * self.ndim)
return np.asarray(self[key], dtype=dtype)
class ImplicitToExplicitIndexingAdapter(utils.NDArrayMixin):
"""Wrap an array, converting tuples into the indicated explicit indexer."""
__slots__ = ("array", "indexer_cls")
def __init__(self, array, indexer_cls=BasicIndexer):
self.array = as_indexable(array)
self.indexer_cls = indexer_cls
def __array__(self, dtype=None):
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
key = expanded_indexer(key, self.ndim)
result = self.array[self.indexer_cls(key)]
if isinstance(result, ExplicitlyIndexed):
return type(self)(result, self.indexer_cls)
else:
# Sometimes explicitly indexed arrays return NumPy arrays or
# scalars.
return result
class LazilyOuterIndexedArray(ExplicitlyIndexedNDArrayMixin):
"""Wrap an array to make basic and outer indexing lazy.
"""
__slots__ = ("array", "key")
def __init__(self, array, key=None):
"""
Parameters
----------
array : array_like
Array like object to index.
key : ExplicitIndexer, optional
Array indexer. If provided, it is assumed to already be in
canonical expanded form.
"""
if isinstance(array, type(self)) and key is None:
# unwrap
key = array.key
array = array.array
if key is None:
key = BasicIndexer((slice(None),) * array.ndim)
self.array = as_indexable(array)
self.key = key
def _updated_key(self, new_key):
iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim))
full_key = []
for size, k in zip(self.array.shape, self.key.tuple):
if isinstance(k, integer_types):
full_key.append(k)
else:
full_key.append(_index_indexer_1d(k, next(iter_new_key), size))
full_key = tuple(full_key)
if all(isinstance(k, integer_types + (slice,)) for k in full_key):
return BasicIndexer(full_key)
return OuterIndexer(full_key)
@property
def shape(self):
shape = []
for size, k in zip(self.array.shape, self.key.tuple):
if isinstance(k, slice):
shape.append(len(range(*k.indices(size))))
elif isinstance(k, np.ndarray):
shape.append(k.size)
return tuple(shape)
def __array__(self, dtype=None):
array = as_indexable(self.array)
return np.asarray(array[self.key], dtype=None)
def transpose(self, order):
return LazilyVectorizedIndexedArray(self.array, self.key).transpose(order)
def __getitem__(self, indexer):
if isinstance(indexer, VectorizedIndexer):
array = LazilyVectorizedIndexedArray(self.array, self.key)
return array[indexer]
return type(self)(self.array, self._updated_key(indexer))
def __setitem__(self, key, value):
if isinstance(key, VectorizedIndexer):
raise NotImplementedError(
"Lazy item assignment with the vectorized indexer is not yet "
"implemented. Load your data first by .load() or compute()."
)
full_key = self._updated_key(key)
self.array[full_key] = value
def __repr__(self):
return f"{type(self).__name__}(array={self.array!r}, key={self.key!r})"
class LazilyVectorizedIndexedArray(ExplicitlyIndexedNDArrayMixin):
"""Wrap an array to make vectorized indexing lazy.
"""
__slots__ = ("array", "key")
def __init__(self, array, key):
"""
Parameters
----------
array : array_like
Array like object to index.
key : VectorizedIndexer
"""
if isinstance(key, (BasicIndexer, OuterIndexer)):
self.key = _outer_to_vectorized_indexer(key, array.shape)
else:
self.key = _arrayize_vectorized_indexer(key, array.shape)
self.array = as_indexable(array)
@property
def shape(self):
return np.broadcast(*self.key.tuple).shape
def __array__(self, dtype=None):
return np.asarray(self.array[self.key], dtype=None)
def _updated_key(self, new_key):
return _combine_indexers(self.key, self.shape, new_key)
def __getitem__(self, indexer):
# If the indexed array becomes a scalar, return LazilyOuterIndexedArray
if all(isinstance(ind, integer_types) for ind in indexer.tuple):
key = BasicIndexer(tuple(k[indexer.tuple] for k in self.key.tuple))
return LazilyOuterIndexedArray(self.array, key)
return type(self)(self.array, self._updated_key(indexer))
def transpose(self, order):
key = VectorizedIndexer(tuple(k.transpose(order) for k in self.key.tuple))
return type(self)(self.array, key)
def __setitem__(self, key, value):
raise NotImplementedError(
"Lazy item assignment with the vectorized indexer is not yet "
"implemented. Load your data first by .load() or compute()."
)
def __repr__(self):
return f"{type(self).__name__}(array={self.array!r}, key={self.key!r})"
def _wrap_numpy_scalars(array):
"""Wrap NumPy scalars in 0d arrays."""
if np.isscalar(array):
return np.array(array)
else:
return array
class CopyOnWriteArray(ExplicitlyIndexedNDArrayMixin):
__slots__ = ("array", "_copied")
def __init__(self, array):
self.array = as_indexable(array)
self._copied = False
def _ensure_copied(self):
if not self._copied:
self.array = as_indexable(np.array(self.array))
self._copied = True
def __array__(self, dtype=None):
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
return type(self)(_wrap_numpy_scalars(self.array[key]))
def transpose(self, order):
return self.array.transpose(order)
def __setitem__(self, key, value):
self._ensure_copied()
self.array[key] = value
class MemoryCachedArray(ExplicitlyIndexedNDArrayMixin):
__slots__ = ("array",)
def __init__(self, array):
self.array = _wrap_numpy_scalars(as_indexable(array))
def _ensure_cached(self):
if not isinstance(self.array, NumpyIndexingAdapter):
self.array = NumpyIndexingAdapter(np.asarray(self.array))
def __array__(self, dtype=None):
self._ensure_cached()
return np.asarray(self.array, dtype=dtype)
def __getitem__(self, key):
return type(self)(_wrap_numpy_scalars(self.array[key]))
def transpose(self, order):
return self.array.transpose(order)
def __setitem__(self, key, value):
self.array[key] = value
def as_indexable(array):
"""
This function always returns a ExplicitlyIndexed subclass,
so that the vectorized indexing is always possible with the returned
object.
"""
if isinstance(array, ExplicitlyIndexed):
return array
if isinstance(array, np.ndarray):
return NumpyIndexingAdapter(array)
if isinstance(array, pd.Index):
return PandasIndexAdapter(array)
if isinstance(array, dask_array_type):
return DaskIndexingAdapter(array)
if hasattr(array, "__array_function__"):
return NdArrayLikeIndexingAdapter(array)
raise TypeError("Invalid array type: {}".format(type(array)))
def _outer_to_vectorized_indexer(key, shape):
"""Convert an OuterIndexer into an vectorized indexer.
Parameters
----------
key : Outer/Basic Indexer
An indexer to convert.
shape : tuple
Shape of the array subject to the indexing.
Returns
-------
VectorizedIndexer
Tuple suitable for use to index a NumPy array with vectorized indexing.
Each element is an array: broadcasting them together gives the shape
of the result.
"""
key = key.tuple
n_dim = len([k for k in key if not isinstance(k, integer_types)])
i_dim = 0
new_key = []
for k, size in zip(key, shape):
if isinstance(k, integer_types):
new_key.append(np.array(k).reshape((1,) * n_dim))
else: # np.ndarray or slice
if isinstance(k, slice):
k = np.arange(*k.indices(size))
assert k.dtype.kind in {"i", "u"}
shape = [(1,) * i_dim + (k.size,) + (1,) * (n_dim - i_dim - 1)]
new_key.append(k.reshape(*shape))
i_dim += 1
return VectorizedIndexer(tuple(new_key))
def _outer_to_numpy_indexer(key, shape):
"""Convert an OuterIndexer into an indexer for NumPy.
Parameters
----------
key : Basic/OuterIndexer
An indexer to convert.
shape : tuple
Shape of the array subject to the indexing.
Returns
-------
tuple
Tuple suitable for use to index a NumPy array.
"""
if len([k for k in key.tuple if not isinstance(k, slice)]) <= 1:
# If there is only one vector and all others are slice,
# it can be safely used in mixed basic/advanced indexing.
# Boolean index should already be converted to integer array.
return key.tuple
else:
return _outer_to_vectorized_indexer(key, shape).tuple
def _combine_indexers(old_key, shape, new_key):
""" Combine two indexers.
Parameters
----------
old_key: ExplicitIndexer
The first indexer for the original array
shape: tuple of ints
Shape of the original array to be indexed by old_key
new_key:
The second indexer for indexing original[old_key]
"""
if not isinstance(old_key, VectorizedIndexer):
old_key = _outer_to_vectorized_indexer(old_key, shape)
if len(old_key.tuple) == 0:
return new_key
new_shape = np.broadcast(*old_key.tuple).shape
if isinstance(new_key, VectorizedIndexer):
new_key = _arrayize_vectorized_indexer(new_key, new_shape)
else:
new_key = _outer_to_vectorized_indexer(new_key, new_shape)
return VectorizedIndexer(
tuple(o[new_key.tuple] for o in np.broadcast_arrays(*old_key.tuple))
)
@enum.unique
class IndexingSupport(enum.Enum):
# for backends that support only basic indexer
BASIC = 0
# for backends that support basic / outer indexer
OUTER = 1
# for backends that support outer indexer including at most 1 vector.
OUTER_1VECTOR = 2
# for backends that support full vectorized indexer.
VECTORIZED = 3
def explicit_indexing_adapter(
key: ExplicitIndexer,
shape: Tuple[int, ...],
indexing_support: IndexingSupport,
raw_indexing_method: Callable,
) -> Any:
"""Support explicit indexing by delegating to a raw indexing method.
Outer and/or vectorized indexers are supported by indexing a second time
with a NumPy array.
Parameters
----------
key : ExplicitIndexer
Explicit indexing object.
shape : Tuple[int, ...]
Shape of the indexed array.
indexing_support : IndexingSupport enum
Form of indexing supported by raw_indexing_method.
raw_indexing_method: callable
Function (like ndarray.__getitem__) that when called with indexing key
in the form of a tuple returns an indexed array.
Returns
-------
Indexing result, in the form of a duck numpy-array.
"""
raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support)
result = raw_indexing_method(raw_key.tuple)
if numpy_indices.tuple:
# index the loaded np.ndarray
result = NumpyIndexingAdapter(np.asarray(result))[numpy_indices]
return result
def decompose_indexer(
indexer: ExplicitIndexer, shape: Tuple[int, ...], indexing_support: IndexingSupport
) -> Tuple[ExplicitIndexer, ExplicitIndexer]:
if isinstance(indexer, VectorizedIndexer):
return _decompose_vectorized_indexer(indexer, shape, indexing_support)
if isinstance(indexer, (BasicIndexer, OuterIndexer)):
return _decompose_outer_indexer(indexer, shape, indexing_support)
raise TypeError(f"unexpected key type: {indexer}")
def _decompose_slice(key, size):
""" convert a slice to successive two slices. The first slice always has
a positive step.
"""
start, stop, step = key.indices(size)
if step > 0:
# If key already has a positive step, use it as is in the backend
return key, slice(None)
else:
# determine stop precisely for step > 1 case
# e.g. [98:2:-2] -> [98:3:-2]
stop = start + int((stop - start - 1) / step) * step + 1
start, stop = stop + 1, start + 1
return slice(start, stop, -step), slice(None, None, -1)
def _decompose_vectorized_indexer(
indexer: VectorizedIndexer,
shape: Tuple[int, ...],
indexing_support: IndexingSupport,
) -> Tuple[ExplicitIndexer, ExplicitIndexer]:
"""
Decompose vectorized indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index loaded on-memory np.ndarray.
Parameters
----------
indexer: VectorizedIndexer
indexing_support: one of IndexerSupport entries
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a vectorized indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports outer indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = OuterIndexer([0, 1, 3], [2, 3])
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = VectorizedIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # vectorized indexing for on-memory np.ndarray.
"""
assert isinstance(indexer, VectorizedIndexer)
if indexing_support is IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
backend_indexer_elems = []
np_indexer_elems = []
# convert negative indices
indexer_elems = [
np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k
for k, s in zip(indexer.tuple, shape)
]
for k, s in zip(indexer_elems, shape):
if isinstance(k, slice):
# If it is a slice, then we will slice it as-is
# (but make its step positive) in the backend,
# and then use all of it (slice(None)) for the in-memory portion.
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer_elems.append(bk_slice)
np_indexer_elems.append(np_slice)
else:
# If it is a (multidimensional) np.ndarray, just pickup the used
# keys without duplication and store them as a 1d-np.ndarray.
oind, vind = np.unique(k, return_inverse=True)
backend_indexer_elems.append(oind)
np_indexer_elems.append(vind.reshape(*k.shape))
backend_indexer = OuterIndexer(tuple(backend_indexer_elems))
np_indexer = VectorizedIndexer(tuple(np_indexer_elems))
if indexing_support is IndexingSupport.OUTER:
return backend_indexer, np_indexer
# If the backend does not support outer indexing,
# backend_indexer (OuterIndexer) is also decomposed.
backend_indexer1, np_indexer1 = _decompose_outer_indexer(
backend_indexer, shape, indexing_support
)
np_indexer = _combine_indexers(np_indexer1, shape, np_indexer)
return backend_indexer1, np_indexer
def _decompose_outer_indexer(
indexer: Union[BasicIndexer, OuterIndexer],
shape: Tuple[int, ...],
indexing_support: IndexingSupport,
) -> Tuple[ExplicitIndexer, ExplicitIndexer]:
"""
Decompose outer indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index the loaded on-memory np.ndarray.
Parameters
----------
indexer: OuterIndexer or BasicIndexer
indexing_support: One of the entries of IndexingSupport
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports basic indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = BasicIndexer(slice(0, 3), slice(2, 3))
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = OuterIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # outer indexing for on-memory np.ndarray.
"""
if indexing_support == IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
assert isinstance(indexer, (OuterIndexer, BasicIndexer))
backend_indexer = []
np_indexer = []
# make indexer positive
pos_indexer = []
for k, s in zip(indexer.tuple, shape):
if isinstance(k, np.ndarray):
pos_indexer.append(np.where(k < 0, k + s, k))
elif isinstance(k, integer_types) and k < 0:
pos_indexer.append(k + s)
else:
pos_indexer.append(k)
indexer_elems = pos_indexer
if indexing_support is IndexingSupport.OUTER_1VECTOR:
# some backends such as h5py supports only 1 vector in indexers
# We choose the most efficient axis
gains = [
(np.max(k) - np.min(k) + 1.0) / len(np.unique(k))
if isinstance(k, np.ndarray)
else 0
for k in indexer_elems
]
array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None
for i, (k, s) in enumerate(zip(indexer_elems, shape)):
if isinstance(k, np.ndarray) and i != array_index:
# np.ndarray key is converted to slice that covers the entire
# entries of this key.
backend_indexer.append(slice(np.min(k), np.max(k) + 1))
np_indexer.append(k - np.min(k))
elif isinstance(k, np.ndarray):
# Remove duplicates and sort them in the increasing order
pkey, ekey = np.unique(k, return_inverse=True)
backend_indexer.append(pkey)
np_indexer.append(ekey)
elif isinstance(k, integer_types):
backend_indexer.append(k)
else: # slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
if indexing_support == IndexingSupport.OUTER:
for k, s in zip(indexer_elems, shape):
if isinstance(k, slice):
# slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
elif isinstance(k, integer_types):
backend_indexer.append(k)
elif isinstance(k, np.ndarray) and (np.diff(k) >= 0).all():
backend_indexer.append(k)
np_indexer.append(slice(None))
else:
# Remove duplicates and sort them in the increasing order
oind, vind = np.unique(k, return_inverse=True)
backend_indexer.append(oind)
np_indexer.append(vind.reshape(*k.shape))
return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
# basic indexer
assert indexing_support == IndexingSupport.BASIC
for k, s in zip(indexer_elems, shape):
if isinstance(k, np.ndarray):
# np.ndarray key is converted to slice that covers the entire
# entries of this key.
backend_indexer.append(slice(np.min(k), np.max(k) + 1))
np_indexer.append(k - np.min(k))
elif isinstance(k, integer_types):
backend_indexer.append(k)
else: # slice: convert positive step slice for backend
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
return (BasicIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))
def _arrayize_vectorized_indexer(indexer, shape):
""" Return an identical vindex but slices are replaced by arrays """
slices = [v for v in indexer.tuple if isinstance(v, slice)]
if len(slices) == 0:
return indexer
arrays = [v for v in indexer.tuple if isinstance(v, np.ndarray)]
n_dim = arrays[0].ndim if len(arrays) > 0 else 0
i_dim = 0
new_key = []
for v, size in zip(indexer.tuple, shape):
if isinstance(v, np.ndarray):
new_key.append(np.reshape(v, v.shape + (1,) * len(slices)))
else: # slice
shape = (1,) * (n_dim + i_dim) + (-1,) + (1,) * (len(slices) - i_dim - 1)
new_key.append(np.arange(*v.indices(size)).reshape(shape))
i_dim += 1
return VectorizedIndexer(tuple(new_key))
def _dask_array_with_chunks_hint(array, chunks):
"""Create a dask array using the chunks hint for dimensions of size > 1."""
import dask.array as da
if len(chunks) < array.ndim:
raise ValueError("not enough chunks in hint")
new_chunks = []
for chunk, size in zip(chunks, array.shape):
new_chunks.append(chunk if size > 1 else (1,))
return da.from_array(array, new_chunks)
def _logical_any(args):
return functools.reduce(operator.or_, args)
def _masked_result_drop_slice(key, data=None):
key = (k for k in key if not isinstance(k, slice))
chunks_hint = getattr(data, "chunks", None)
new_keys = []
for k in key:
if isinstance(k, np.ndarray):
if isinstance(data, dask_array_type):
new_keys.append(_dask_array_with_chunks_hint(k, chunks_hint))
elif isinstance(data, sparse_array_type):
import sparse
new_keys.append(sparse.COO.from_numpy(k))
else:
new_keys.append(k)
else:
new_keys.append(k)
mask = _logical_any(k == -1 for k in new_keys)
return mask
def create_mask(indexer, shape, data=None):
"""Create a mask for indexing with a fill-value.
Parameters
----------
indexer : ExplicitIndexer
Indexer with -1 in integer or ndarray value to indicate locations in
the result that should be masked.
shape : tuple
Shape of the array being indexed.
data : optional
Data for which mask is being created. If data is a dask arrays, its chunks
are used as a hint for chunks on the resulting mask. If data is a sparse
array, the returned mask is also a sparse array.
Returns
-------
mask : bool, np.ndarray, SparseArray or dask.array.Array with dtype=bool
Same type as data. Has the same shape as the indexing result.
"""
if isinstance(indexer, OuterIndexer):
key = _outer_to_vectorized_indexer(indexer, shape).tuple
assert not any(isinstance(k, slice) for k in key)
mask = _masked_result_drop_slice(key, data)
elif isinstance(indexer, VectorizedIndexer):
key = indexer.tuple
base_mask = _masked_result_drop_slice(key, data)
slice_shape = tuple(
np.arange(*k.indices(size)).size
for k, size in zip(key, shape)
if isinstance(k, slice)
)
expanded_mask = base_mask[(Ellipsis,) + (np.newaxis,) * len(slice_shape)]
mask = duck_array_ops.broadcast_to(expanded_mask, base_mask.shape + slice_shape)
elif isinstance(indexer, BasicIndexer):
mask = any(k == -1 for k in indexer.tuple)
else:
raise TypeError("unexpected key type: {}".format(type(indexer)))
return mask
def _posify_mask_subindexer(index):
"""Convert masked indices in a flat array to the nearest unmasked index.
Parameters
----------
index : np.ndarray
One dimensional ndarray with dtype=int.
Returns
-------
np.ndarray
One dimensional ndarray with all values equal to -1 replaced by an
adjacent non-masked element.
"""
masked = index == -1
unmasked_locs = np.flatnonzero(~masked)
if not unmasked_locs.size:
# indexing unmasked_locs is invalid
return np.zeros_like(index)
masked_locs = np.flatnonzero(masked)
prev_value = np.maximum(0, np.searchsorted(unmasked_locs, masked_locs) - 1)
new_index = index.copy()
new_index[masked_locs] = index[unmasked_locs[prev_value]]
return new_index
def posify_mask_indexer(indexer):
"""Convert masked values (-1) in an indexer to nearest unmasked values.
This routine is useful for dask, where it can be much faster to index
adjacent points than arbitrary points from the end of an array.
Parameters
----------
indexer : ExplicitIndexer
Input indexer.
Returns
-------
ExplicitIndexer
Same type of input, with all values in ndarray keys equal to -1
replaced by an adjacent non-masked element.
"""
key = tuple(
_posify_mask_subindexer(k.ravel()).reshape(k.shape)
if isinstance(k, np.ndarray)
else k
for k in indexer.tuple
)
return type(indexer)(key)
class NumpyIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a NumPy array to use explicit indexing."""
__slots__ = ("array",)
def __init__(self, array):
# In NumpyIndexingAdapter we only allow to store bare np.ndarray
if not isinstance(array, np.ndarray):
raise TypeError(
"NumpyIndexingAdapter only wraps np.ndarray. "
"Trying to wrap {}".format(type(array))
)
self.array = array
def _indexing_array_and_key(self, key):
if isinstance(key, OuterIndexer):
array = self.array
key = _outer_to_numpy_indexer(key, self.array.shape)
elif isinstance(key, VectorizedIndexer):
array = nputils.NumpyVIndexAdapter(self.array)
key = key.tuple
elif isinstance(key, BasicIndexer):
array = self.array
# We want 0d slices rather than scalars. This is achieved by
# appending an ellipsis (see
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes).
key = key.tuple + (Ellipsis,)
else:
raise TypeError("unexpected key type: {}".format(type(key)))
return array, key
def transpose(self, order):
return self.array.transpose(order)
def __getitem__(self, key):
array, key = self._indexing_array_and_key(key)
return array[key]
def __setitem__(self, key, value):
array, key = self._indexing_array_and_key(key)
try:
array[key] = value
except ValueError:
# More informative exception if read-only view
if not array.flags.writeable and not array.flags.owndata:
raise ValueError(
"Assignment destination is a view. "
"Do you want to .copy() array first?"
)
else:
raise
class NdArrayLikeIndexingAdapter(NumpyIndexingAdapter):
__slots__ = ("array",)
def __init__(self, array):
if not hasattr(array, "__array_function__"):
raise TypeError(
"NdArrayLikeIndexingAdapter must wrap an object that "
"implements the __array_function__ protocol"
)
self.array = array
class DaskIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a dask array to support explicit indexing."""
__slots__ = ("array",)
def __init__(self, array):
""" This adapter is created in Variable.__getitem__ in
Variable._broadcast_indexes.
"""
self.array = array
def __getitem__(self, key):
if isinstance(key, BasicIndexer):
return self.array[key.tuple]
elif isinstance(key, VectorizedIndexer):
return self.array.vindex[key.tuple]
else:
assert isinstance(key, OuterIndexer)
key = key.tuple
try:
return self.array[key]
except NotImplementedError:
# manual orthogonal indexing.
# TODO: port this upstream into dask in a saner way.
value = self.array
for axis, subkey in reversed(list(enumerate(key))):
value = value[(slice(None),) * axis + (subkey,)]
return value
def __setitem__(self, key, value):
raise TypeError(
"this variable's data is stored in a dask array, "
"which does not support item assignment. To "
"assign to this variable, you must first load it "
"into memory explicitly using the .load() "
"method or accessing its .values attribute."
)
def transpose(self, order):
return self.array.transpose(order)
class PandasIndexAdapter(ExplicitlyIndexedNDArrayMixin):
"""Wrap a pandas.Index to preserve dtypes and handle explicit indexing.
"""
__slots__ = ("array", "_dtype")
def __init__(self, array: Any, dtype: DTypeLike = None):
self.array = utils.safe_cast_to_index(array)
if dtype is None:
if isinstance(array, pd.PeriodIndex):
dtype = np.dtype("O")
elif hasattr(array, "categories"):
# category isn't a real numpy dtype
dtype = array.categories.dtype
elif not utils.is_valid_numpy_dtype(array.dtype):
dtype = np.dtype("O")
else:
dtype = array.dtype
else:
dtype = np.dtype(dtype)
self._dtype = dtype
@property
def dtype(self) -> np.dtype:
return self._dtype
def __array__(self, dtype: DTypeLike = None) -> np.ndarray:
if dtype is None:
dtype = self.dtype
array = self.array
if isinstance(array, pd.PeriodIndex):
with suppress(AttributeError):
# this might not be public API
array = array.astype("object")
return np.asarray(array.values, dtype=dtype)
@property
def shape(self) -> Tuple[int]:
return (len(self.array),)
def __getitem__(
self, indexer
) -> Union[NumpyIndexingAdapter, np.ndarray, np.datetime64, np.timedelta64]:
key = indexer.tuple
if isinstance(key, tuple) and len(key) == 1:
# unpack key so it can index a pandas.Index object (pandas.Index
# objects don't like tuples)
key, = key
if getattr(key, "ndim", 0) > 1: # Return np-array if multidimensional
return NumpyIndexingAdapter(self.array.values)[indexer]
result = self.array[key]
if isinstance(result, pd.Index):
result = PandasIndexAdapter(result, dtype=self.dtype)
else:
# result is a scalar
if result is pd.NaT:
# work around the impossibility of casting NaT with asarray
# note: it probably would be better in general to return
# pd.Timestamp rather np.than datetime64 but this is easier
# (for now)
result = np.datetime64("NaT", "ns")
elif isinstance(result, timedelta):
result = np.timedelta64(getattr(result, "value", result), "ns")
elif isinstance(result, pd.Timestamp):
# Work around for GH: pydata/xarray#1932 and numpy/numpy#10668
# numpy fails to convert pd.Timestamp to np.datetime64[ns]
result = np.asarray(result.to_datetime64())
elif self.dtype != object:
result = np.asarray(result, dtype=self.dtype)
# as for numpy.ndarray indexing, we always want the result to be
# a NumPy array.
result = utils.to_0d_array(result)
return result
def transpose(self, order) -> pd.Index:
return self.array # self.array should be always one-dimensional
def __repr__(self) -> str:
return "{}(array={!r}, dtype={!r})".format(
type(self).__name__, self.array, self.dtype
)
def copy(self, deep: bool = True) -> "PandasIndexAdapter":
# Not the same as just writing `self.array.copy(deep=deep)`, as
# shallow copies of the underlying numpy.ndarrays become deep ones
# upon pickling
# >>> len(pickle.dumps((self.array, self.array)))
# 4000281
# >>> len(pickle.dumps((self.array, self.array.copy(deep=False))))
# 8000341
array = self.array.copy(deep=True) if deep else self.array
return PandasIndexAdapter(array, self._dtype)
| apache-2.0 |
jreback/pandas | pandas/tests/series/indexing/test_xs.py | 2 | 2293 | import numpy as np
from pandas import MultiIndex, Series, date_range
import pandas._testing as tm
def test_xs_datetimelike_wrapping():
# GH#31630 a case where we shouldn't wrap datetime64 in Timestamp
arr = date_range("2016-01-01", periods=3)._data._data
ser = Series(arr, dtype=object)
for i in range(len(ser)):
ser.iloc[i] = arr[i]
assert ser.dtype == object
assert isinstance(ser[0], np.datetime64)
result = ser.xs(0)
assert isinstance(result, np.datetime64)
class TestXSWithMultiIndex:
def test_xs_level_series(self, multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
ser = df["A"]
expected = ser[:, "two"]
result = df.xs("two", level=1)["A"]
tm.assert_series_equal(result, expected)
def test_series_getitem_multiindex_xs_by_label(self):
# GH#5684
idx = MultiIndex.from_tuples(
[("a", "one"), ("a", "two"), ("b", "one"), ("b", "two")]
)
ser = Series([1, 2, 3, 4], index=idx)
return_value = ser.index.set_names(["L1", "L2"], inplace=True)
assert return_value is None
expected = Series([1, 3], index=["a", "b"])
return_value = expected.index.set_names(["L1"], inplace=True)
assert return_value is None
result = ser.xs("one", level="L2")
tm.assert_series_equal(result, expected)
def test_series_getitem_multiindex_xs(xs):
# GH#6258
dt = list(date_range("20130903", periods=3))
idx = MultiIndex.from_product([list("AB"), dt])
ser = Series([1, 3, 4, 1, 3, 4], index=idx)
expected = Series([1, 1], index=list("AB"))
result = ser.xs("20130903", level=1)
tm.assert_series_equal(result, expected)
def test_series_xs_droplevel_false(self):
# GH: 19056
mi = MultiIndex.from_tuples(
[("a", "x"), ("a", "y"), ("b", "x")], names=["level1", "level2"]
)
ser = Series([1, 1, 1], index=mi)
result = ser.xs("a", axis=0, drop_level=False)
expected = Series(
[1, 1],
index=MultiIndex.from_tuples(
[("a", "x"), ("a", "y")], names=["level1", "level2"]
),
)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
liam2/larray | larray/tests/test_array.py | 2 | 174871 | # -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function
import os
import re
import sys
import pytest
import numpy as np
import pandas as pd
from collections import OrderedDict
from larray.tests.common import (inputpath, tmp_path, meta,
assert_array_equal, assert_array_nan_equal, assert_larray_equiv, assert_larray_equal,
needs_xlwings, needs_pytables, needs_xlsxwriter, needs_xlrd,
needs_python35, needs_python36, needs_python37)
from larray import (Array, LArray, Axis, LGroup, union, zeros, zeros_like, ndtest, empty, ones, eye, diag, stack,
clip, exp, where, X, mean, isnan, round, read_hdf, read_csv, read_eurostat, read_excel,
from_lists, from_string, open_excel, from_frame, sequence, nan, IGroup)
from larray.inout.pandas import from_series
from larray.core.axis import _to_ticks, _to_key
from larray.util.misc import LHDFStore
from larray.util.compat import StringIO
from larray.core.metadata import Metadata
# ================== #
# Test Value Strings #
# ================== #
def test_value_string_split():
assert_array_equal(_to_ticks('M,F'), np.asarray(['M', 'F']))
assert_array_equal(_to_ticks('M, F'), np.asarray(['M', 'F']))
def test_value_string_union():
assert union('A11,A22', 'A12,A22') == ['A11', 'A22', 'A12']
def test_value_string_range():
assert_array_equal(_to_ticks('0..115'), np.asarray(range(116)))
assert_array_equal(_to_ticks('..115'), np.asarray(range(116)))
with pytest.raises(ValueError):
_to_ticks('10..')
with pytest.raises(ValueError):
_to_ticks('..')
# ================ #
# Test Key Strings #
# ================ #
def test_key_string_nonstring():
assert _to_key(('M', 'F')) == ['M', 'F']
assert _to_key(['M', 'F']) == ['M', 'F']
def test_key_string_split():
assert _to_key('M,F') == ['M', 'F']
assert _to_key('M, F') == ['M', 'F']
assert _to_key('M,') == ['M']
assert _to_key('M') == 'M'
def test_key_string_slice_strings():
# these two examples have different results and this is fine because numeric axes do not necessarily start at 0
assert _to_key('0:115') == slice(0, 115)
assert _to_key(':115') == slice(115)
assert _to_key('10:') == slice(10, None)
assert _to_key(':') == slice(None)
# =================== #
# Test Metadata #
# =================== #
def test_read_set_update_delete_metadata(meta, tmpdir):
# __eq__
meta2 = meta.copy()
assert meta2 == meta
# set/get metadata to/from an array
arr = ndtest((3, 3))
arr.meta = meta
assert arr.meta == meta
# access item
assert arr.meta.date == meta.date
# add new item
arr.meta.city = 'London'
assert arr.meta.city == 'London'
# update item
arr.meta.city = 'Berlin'
assert arr.meta.city == 'Berlin'
# __contains__
assert 'city' in arr.meta
# delete item
del arr.meta.city
assert arr.meta == meta
# __reduce__ and __reduce_ex__
import pickle
fname = os.path.join(tmpdir.strpath, 'test_metadata.pkl')
with open(fname, 'wb') as f:
pickle.dump(meta, f)
with open(fname, 'rb') as f:
meta2 = Metadata(pickle.load(f))
assert meta2 == meta
@needs_pytables
def test_metadata_hdf(meta, tmpdir):
key = 'meta'
fname = os.path.join(tmpdir.strpath, 'test_metadata.hdf')
with LHDFStore(fname) as store:
ndtest(3).to_hdf(store, key)
meta.to_hdf(store, key)
meta2 = Metadata.from_hdf(store, key)
assert meta2 == meta
def test_meta_arg_array_creation(array):
meta_list = [('title', 'test array'), ('description', 'Array used for testing'),
('author', 'John Cleese')]
meta = Metadata(meta_list)
# meta as list
arr = Array(array.data, array.axes, meta=meta_list)
assert arr.meta == meta
# meta as OrderedDict
arr = Array(array.data, array.axes, meta=OrderedDict(meta_list))
assert arr.meta == meta
# ================ #
# Test Array #
# ================ #
# AXES
lipro = Axis(['P%02d' % i for i in range(1, 16)], 'lipro')
age = Axis('age=0..115')
sex = Axis('sex=M,F')
vla = 'A11,A12,A13,A23,A24,A31,A32,A33,A34,A35,A36,A37,A38,A41,A42,A43,A44,A45,A46,A71,A72,A73'
wal = 'A25,A51,A52,A53,A54,A55,A56,A57,A61,A62,A63,A64,A65,A81,A82,A83,A84,A85,A91,A92,A93'
bru = 'A21'
vla_str = vla
wal_str = wal
bru_str = bru
belgium = union(vla, wal, bru)
geo = Axis(belgium, 'geo')
# ARRAYS
@pytest.fixture()
def array():
data = np.arange(116 * 44 * 2 * 15).reshape(116, 44, 2, 15).astype(float)
return Array(data, axes=(age, geo, sex, lipro))
@pytest.fixture()
def small_array():
small_data = np.arange(30).reshape(2, 15)
return Array(small_data, axes=(sex, lipro))
io_1d = ndtest(3)
io_2d = ndtest("a=1..3; b=b0,b1")
io_3d = ndtest("a=1..3; b=b0,b1; c=c0..c2")
io_int_labels = ndtest("a=0..2; b=0..2; c=0..2")
io_unsorted = ndtest("a=3..1; b=b1,b0; c=c2..c0")
io_missing_values = ndtest("a=1..3; b=b0,b1; c=c0..c2", dtype=float)
io_missing_values[2, 'b0'] = nan
io_missing_values[3, 'b1'] = nan
io_narrow_missing_values = io_missing_values.copy()
io_narrow_missing_values[2, 'b1', 'c1'] = nan
def test_larray_renamed_as_array():
with pytest.warns(FutureWarning) as caught_warnings:
arr = LArray([0, 1, 2, 3], 'a=a0..a3')
assert len(caught_warnings) == 1
assert caught_warnings[0].message.args[0] == "LArray has been renamed as Array."
assert caught_warnings[0].filename == __file__
def test_ndtest():
arr = ndtest('a=a0..a2')
assert arr.shape == (3,)
assert arr.axes.names == ['a']
assert_array_equal(arr.data, np.arange(3))
# using an explicit Axis object
a = Axis('a=a0..a2')
arr = ndtest(a)
assert arr.shape == (3,)
assert arr.axes.names == ['a']
assert_array_equal(arr.data, np.arange(3))
# using a group as an axis
arr = ndtest(a[:'a1'])
assert arr.shape == (2,)
assert arr.axes.names == ['a']
assert_array_equal(arr.data, np.arange(2))
def test_getattr(array):
assert type(array.geo) == Axis
assert array.geo is geo
with pytest.raises(AttributeError):
array.geom
def test_zeros():
la = zeros((geo, age))
assert la.shape == (44, 116)
assert_array_equal(la, np.zeros((44, 116)))
def test_zeros_like(array):
la = zeros_like(array)
assert la.shape == (116, 44, 2, 15)
assert_array_equal(la, np.zeros((116, 44, 2, 15)))
def test_bool():
a = ones([2])
# ValueError: The truth value of an array with more than one element
# is ambiguous. Use a.any() or a.all()
with pytest.raises(ValueError):
bool(a)
a = ones([1])
assert bool(a)
a = zeros([1])
assert not bool(a)
a = Array(np.array(2), [])
assert bool(a)
a = Array(np.array(0), [])
assert not bool(a)
def test_iter(small_array):
l = list(small_array)
assert_array_equal(l[0], small_array['M'])
assert_array_equal(l[1], small_array['F'])
def test_keys():
arr = ndtest((2, 2))
a, b = arr.axes
keys = arr.keys()
assert list(keys) == [(a.i[0], b.i[0]), (a.i[0], b.i[1]), (a.i[1], b.i[0]), (a.i[1], b.i[1])]
assert keys[0] == (a.i[0], b.i[0])
assert keys[-1] == (a.i[1], b.i[1])
keys = arr.keys(ascending=False)
assert list(keys) == [(a.i[1], b.i[1]), (a.i[1], b.i[0]), (a.i[0], b.i[1]), (a.i[0], b.i[0])]
assert keys[0] == (a.i[1], b.i[1])
assert keys[-1] == (a.i[0], b.i[0])
keys = arr.keys(('b', 'a'))
assert list(keys) == [(b.i[0], a.i[0]), (b.i[0], a.i[1]), (b.i[1], a.i[0]), (b.i[1], a.i[1])]
assert keys[1] == (b.i[0], a.i[1])
assert keys[2] == (b.i[1], a.i[0])
keys = arr.keys(('b', 'a'), ascending=False)
assert list(keys) == [(b.i[1], a.i[1]), (b.i[1], a.i[0]), (b.i[0], a.i[1]), (b.i[0], a.i[0])]
assert keys[1] == (b.i[1], a.i[0])
assert keys[2] == (b.i[0], a.i[1])
keys = arr.keys('b')
assert list(keys) == [(b.i[0],), (b.i[1],)]
assert keys[0] == (b.i[0],)
assert keys[-1] == (b.i[1],)
keys = arr.keys('b', ascending=False)
assert list(keys) == [(b.i[1],), (b.i[0],)]
assert keys[0] == (b.i[1],)
assert keys[-1] == (b.i[0],)
def test_values():
arr = ndtest((2, 2))
a, b = arr.axes
values = arr.values()
assert list(values) == [0, 1, 2, 3]
assert values[0] == 0
assert values[-1] == 3
values = arr.values(ascending=False)
assert list(values) == [3, 2, 1, 0]
assert values[0] == 3
assert values[-1] == 0
values = arr.values(('b', 'a'))
assert list(values) == [0, 2, 1, 3]
assert values[1] == 2
assert values[2] == 1
values = arr.values(('b', 'a'), ascending=False)
assert list(values) == [3, 1, 2, 0]
assert values[1] == 1
assert values[2] == 2
values = arr.values('b')
res = list(values)
assert_larray_equal(res[0], arr['b0'])
assert_larray_equal(res[1], arr['b1'])
assert_larray_equal(values[0], arr['b0'])
assert_larray_equal(values[-1], arr['b1'])
values = arr.values('b', ascending=False)
res = list(values)
assert_larray_equal(res[0], arr['b1'])
assert_larray_equal(res[1], arr['b0'])
assert_larray_equal(values[0], arr['b1'])
assert_larray_equal(values[-1], arr['b0'])
def test_items():
arr = ndtest((2, 2))
a, b = arr.axes
items = arr.items()
assert list(items) == [((a.i[0], b.i[0]), 0), ((a.i[0], b.i[1]), 1), ((a.i[1], b.i[0]), 2), ((a.i[1], b.i[1]), 3)]
assert items[0] == ((a.i[0], b.i[0]), 0)
assert items[-1] == ((a.i[1], b.i[1]), 3)
items = arr.items(ascending=False)
assert list(items) == [((a.i[1], b.i[1]), 3), ((a.i[1], b.i[0]), 2), ((a.i[0], b.i[1]), 1), ((a.i[0], b.i[0]), 0)]
assert items[0] == ((a.i[1], b.i[1]), 3)
assert items[-1] == ((a.i[0], b.i[0]), 0)
items = arr.items(('b', 'a'))
assert list(items) == [((b.i[0], a.i[0]), 0), ((b.i[0], a.i[1]), 2), ((b.i[1], a.i[0]), 1), ((b.i[1], a.i[1]), 3)]
assert items[1] == ((b.i[0], a.i[1]), 2)
assert items[2] == ((b.i[1], a.i[0]), 1)
items = arr.items(('b', 'a'), ascending=False)
assert list(items) == [((b.i[1], a.i[1]), 3), ((b.i[1], a.i[0]), 1), ((b.i[0], a.i[1]), 2), ((b.i[0], a.i[0]), 0)]
assert items[1] == ((b.i[1], a.i[0]), 1)
assert items[2] == ((b.i[0], a.i[1]), 2)
items = arr.items('b')
items_list = list(items)
key, value = items[0]
assert key == (b.i[0],)
assert_larray_equal(value, arr['b0'])
key, value = items_list[0]
assert key == (b.i[0],)
assert_larray_equal(value, arr['b0'])
key, value = items[-1]
assert key == (b.i[1],)
assert_larray_equal(value, arr['b1'])
key, value = items_list[-1]
assert key == (b.i[1],)
assert_larray_equal(value, arr['b1'])
items = arr.items('b', ascending=False)
items_list = list(items)
key, value = items[0]
assert key == (b.i[1],)
assert_larray_equal(value, arr['b1'])
key, value = items_list[0]
assert key == (b.i[1],)
assert_larray_equal(value, arr['b1'])
key, value = items[-1]
assert key == (b.i[0],)
assert_larray_equal(value, arr['b0'])
key, value = items_list[-1]
assert key == (b.i[0],)
assert_larray_equal(value, arr['b0'])
def test_rename(array):
new_array = array.rename('sex', 'gender')
# old array axes names not modified
assert array.axes.names == ['age', 'geo', 'sex', 'lipro']
assert new_array.axes.names == ['age', 'geo', 'gender', 'lipro']
new_array = array.rename(sex, 'gender')
# old array axes names not modified
assert array.axes.names == ['age', 'geo', 'sex', 'lipro']
assert new_array.axes.names == ['age', 'geo', 'gender', 'lipro']
def test_info(array, meta):
array.meta = meta
expected = """\
title: test array
description: Array used for testing
author: John Cleese
location: Ministry of Silly Walks
office_number: 42
score: 9.7
date: 1970-03-21 00:00:00
116 x 44 x 2 x 15
age [116]: 0 1 2 ... 113 114 115
geo [44]: 'A11' 'A12' 'A13' ... 'A92' 'A93' 'A21'
sex [2]: 'M' 'F'
lipro [15]: 'P01' 'P02' 'P03' ... 'P13' 'P14' 'P15'
dtype: float64
memory used: 1.17 Mb"""
assert array.info == expected
def test_str(small_array, array):
lipro3 = lipro['P01:P03']
# zero dimension / scalar
assert str(small_array[lipro['P01'], sex['F']]) == "15"
# empty / len 0 first dimension
assert str(small_array[sex[[]]]) == "Array([])"
# one dimension
assert str(small_array[lipro3, sex['M']]) == """\
lipro P01 P02 P03
0 1 2"""
# two dimensions
assert str(small_array.filter(lipro=lipro3)) == """\
sex\\lipro P01 P02 P03
M 0 1 2
F 15 16 17"""
# four dimensions (too many rows)
assert str(array.filter(lipro=lipro3)) == """\
age geo sex\\lipro P01 P02 P03
0 A11 M 0.0 1.0 2.0
0 A11 F 15.0 16.0 17.0
0 A12 M 30.0 31.0 32.0
0 A12 F 45.0 46.0 47.0
0 A13 M 60.0 61.0 62.0
... ... ... ... ... ...
115 A92 F 153045.0 153046.0 153047.0
115 A93 M 153060.0 153061.0 153062.0
115 A93 F 153075.0 153076.0 153077.0
115 A21 M 153090.0 153091.0 153092.0
115 A21 F 153105.0 153106.0 153107.0"""
# too many columns
assert str(array['P01', 'A11', 'M']) == """\
age 0 1 2 ... 112 113 114 115
0.0 1320.0 2640.0 ... 147840.0 149160.0 150480.0 151800.0"""
arr = Array([0, ''], Axis(['a0', ''], 'a'))
assert str(arr) == "a a0 \n 0 "
def test_getitem(array):
raw = array.data
age, geo, sex, lipro = array.axes
age159 = age[[1, 5, 9]]
lipro159 = lipro['P01,P05,P09']
# LGroup at "correct" place
subset = array[age159]
assert subset.axes[1:] == (geo, sex, lipro)
assert subset.axes[0].equals(Axis([1, 5, 9], 'age'))
assert_array_equal(subset, raw[[1, 5, 9]])
# LGroup at "incorrect" place
assert_array_equal(array[lipro159], raw[..., [0, 4, 8]])
# multiple LGroup key (in "incorrect" order)
res = array[lipro159, age159]
assert res.axes.names == ['age', 'geo', 'sex', 'lipro']
assert_array_equal(res, raw[[1, 5, 9]][..., [0, 4, 8]])
# LGroup key and scalar
res = array[lipro159, 5]
assert res.axes.names == ['geo', 'sex', 'lipro']
assert_array_equal(res, raw[..., [0, 4, 8]][5])
# mixed LGroup/positional key
assert_array_equal(array[[1, 5, 9], lipro159],
raw[[1, 5, 9]][..., [0, 4, 8]])
# single None slice
assert_array_equal(array[:], raw)
# only Ellipsis
assert_array_equal(array[...], raw)
# Ellipsis and LGroup
assert_array_equal(array[..., lipro159], raw[..., [0, 4, 8]])
# string 'int..int'
assert_array_equal(array['10..13'], array['10,11,12,13'])
assert_array_equal(array['8, 10..13, 15'], array['8,10,11,12,13,15'])
# ambiguous label
arr = ndtest("a=l0,l1;b=l1,l2")
res = arr[arr.b['l1']]
assert_array_equal(res, arr.data[:, 0])
# scalar group on another axis
arr = ndtest((3, 2))
alt_a = Axis("alt_a=a1..a2")
lgroup = alt_a['a1']
assert_array_equal(arr[lgroup], arr['a1'])
pgroup = alt_a.i[0]
assert_array_equal(arr[pgroup], arr['a1'])
# key with duplicate axes
with pytest.raises(ValueError):
array[age[1, 2], age[3, 4]]
# key with lgroup from another axis leading to duplicate axis
bad = Axis(3, 'bad')
with pytest.raises(ValueError):
array[bad[1, 2], age[3, 4]]
def test_getitem_abstract_axes(array):
raw = array.data
age, geo, sex, lipro = array.axes
age159 = X.age[1, 5, 9]
lipro159 = X.lipro['P01,P05,P09']
# LGroup at "correct" place
subset = array[age159]
assert subset.axes[1:] == (geo, sex, lipro)
assert subset.axes[0].equals(Axis([1, 5, 9], 'age'))
assert_array_equal(subset, raw[[1, 5, 9]])
# LGroup at "incorrect" place
assert_array_equal(array[lipro159], raw[..., [0, 4, 8]])
# multiple LGroup key (in "incorrect" order)
assert_array_equal(array[lipro159, age159], raw[[1, 5, 9]][..., [0, 4, 8]])
# mixed LGroup/positional key
assert_array_equal(array[[1, 5, 9], lipro159], raw[[1, 5, 9]][..., [0, 4, 8]])
# single None slice
assert_array_equal(array[:], raw)
# only Ellipsis
assert_array_equal(array[...], raw)
# Ellipsis and LGroup
assert_array_equal(array[..., lipro159], raw[..., [0, 4, 8]])
# key with duplicate axes
with pytest.raises(ValueError):
array[X.age[1, 2], X.age[3]]
# key with invalid axis
with pytest.raises(ValueError):
array[X.bad[1, 2], X.age[3, 4]]
def test_getitem_anonymous_axes():
arr = ndtest([Axis(3), Axis(4)])
raw = arr.data
assert_array_equal(arr[X[0][1:]], raw[1:])
assert_array_equal(arr[X[1][2:]], raw[:, 2:])
assert_array_equal(arr[X[0][2:], X[1][1:]], raw[2:, 1:])
assert_array_equal(arr.i[2:, 1:], raw[2:, 1:])
def test_getitem_guess_axis(array):
raw = array.data
age, geo, sex, lipro = array.axes
# key at "correct" place
assert_array_equal(array[[1, 5, 9]], raw[[1, 5, 9]])
subset = array[[1, 5, 9]]
assert subset.axes[1:] == (geo, sex, lipro)
assert subset.axes[0].equals(Axis([1, 5, 9], 'age'))
assert_array_equal(subset, raw[[1, 5, 9]])
# key at "incorrect" place
assert_array_equal(array['P01,P05,P09'], raw[..., [0, 4, 8]])
assert_array_equal(array[['P01', 'P05', 'P09']], raw[..., [0, 4, 8]])
# multiple keys (in "incorrect" order)
assert_array_equal(array['P01,P05,P09', [1, 5, 9]],
raw[[1, 5, 9]][..., [0, 4, 8]])
# mixed LGroup/key
assert_array_equal(array[lipro['P01,P05,P09'], [1, 5, 9]],
raw[[1, 5, 9]][..., [0, 4, 8]])
# single None slice
assert_array_equal(array[:], raw)
# only Ellipsis
assert_array_equal(array[...], raw)
# Ellipsis and LGroup
assert_array_equal(array[..., 'P01,P05,P09'], raw[..., [0, 4, 8]])
assert_array_equal(array[..., ['P01', 'P05', 'P09']], raw[..., [0, 4, 8]])
# LGroup without axis (which also needs to be guessed)
g = LGroup(['P01', 'P05', 'P09'])
assert_array_equal(array[g], raw[..., [0, 4, 8]])
# key with duplicate axes
with pytest.raises(ValueError, match="key has several values for axis: age"):
array[[1, 2], [3, 4]]
# key with invalid label (ie label not found on any axis)
with pytest.raises(ValueError, match="999 is not a valid label for any axis"):
array[[1, 2], 999]
# key with invalid label list (ie list of labels not found on any axis)
with pytest.raises(ValueError, match=r"\[998, 999\] is not a valid label for any axis"):
array[[1, 2], [998, 999]]
# key with partial invalid list (ie list containing a label not found
# on any axis)
# FIXME: the message should be the same as for 999, 4 (ie it should NOT mention age).
with pytest.raises(ValueError, match=r"age\[3, 999\] is not a valid label for any axis"):
array[[1, 2], [3, 999]]
with pytest.raises(ValueError, match=r"\[999, 4\] is not a valid label for any axis"):
array[[1, 2], [999, 4]]
# ambiguous key
arr = ndtest("a=l0,l1;b=l1,l2")
with pytest.raises(ValueError, match=r"l1 is ambiguous \(valid in a, b\)"):
arr['l1']
# ambiguous key disambiguated via string
res = arr['b[l1]']
assert_array_equal(res, arr.data[:, 0])
def test_getitem_positional_group(array):
raw = array.data
age, geo, sex, lipro = array.axes
age159 = age.i[1, 5, 9]
lipro159 = lipro.i[0, 4, 8]
# LGroup at "correct" place
subset = array[age159]
assert subset.axes[1:] == (geo, sex, lipro)
assert subset.axes[0].equals(Axis([1, 5, 9], 'age'))
assert_array_equal(subset, raw[[1, 5, 9]])
# LGroup at "incorrect" place
assert_array_equal(array[lipro159], raw[..., [0, 4, 8]])
# multiple LGroup key (in "incorrect" order)
assert_array_equal(array[lipro159, age159],
raw[[1, 5, 9]][..., [0, 4, 8]])
# mixed LGroup/positional key
assert_array_equal(array[[1, 5, 9], lipro159],
raw[[1, 5, 9]][..., [0, 4, 8]])
# single None slice
assert_array_equal(array[:], raw)
# only Ellipsis
assert_array_equal(array[...], raw)
# Ellipsis and LGroup
assert_array_equal(array[..., lipro159], raw[..., [0, 4, 8]])
# key with duplicate axes
with pytest.raises(ValueError, match="key has several values for axis: age"):
array[age.i[1, 2], age.i[3, 4]]
def test_getitem_str_positional_group():
arr = ndtest('a=l0..l2;b=l0..l2')
a, b = arr.axes
res = arr['b.i[1]']
expected = Array([1, 4, 7], 'a=l0..l2')
assert_array_equal(res, expected)
def test_getitem_abstract_positional(array):
raw = array.data
age, geo, sex, lipro = array.axes
age159 = X.age.i[1, 5, 9]
lipro159 = X.lipro.i[0, 4, 8]
# LGroup at "correct" place
subset = array[age159]
assert subset.axes[1:] == (geo, sex, lipro)
assert subset.axes[0].equals(Axis([1, 5, 9], 'age'))
assert_array_equal(subset, raw[[1, 5, 9]])
# LGroup at "incorrect" place
assert_array_equal(array[lipro159], raw[..., [0, 4, 8]])
# multiple LGroup key (in "incorrect" order)
assert_array_equal(array[lipro159, age159],
raw[[1, 5, 9]][..., [0, 4, 8]])
# mixed LGroup/positional key
assert_array_equal(array[[1, 5, 9], lipro159],
raw[[1, 5, 9]][..., [0, 4, 8]])
# single None slice
assert_array_equal(array[:], raw)
# only Ellipsis
assert_array_equal(array[...], raw)
# Ellipsis and LGroup
assert_array_equal(array[..., lipro159], raw[..., [0, 4, 8]])
# key with duplicate axes
with pytest.raises(ValueError, match="key has several values for axis: age"):
array[X.age.i[2, 3], X.age.i[1, 5]]
def test_getitem_bool_larray_key_arr_whout_bool_axis():
arr = ndtest((3, 2, 4))
raw = arr.data
# all dimensions
res = arr[arr < 5]
assert isinstance(res, Array)
assert res.ndim == 1
assert_array_equal(res, raw[raw < 5])
# missing dimension
filter_ = arr['b1'] % 5 == 0
res = arr[filter_]
assert isinstance(res, Array)
assert res.ndim == 2
assert res.shape == (3, 2)
raw_key = raw[:, 1, :] % 5 == 0
raw_d1, raw_d3 = raw_key.nonzero()
assert_array_equal(res, raw[raw_d1, :, raw_d3])
# using an Axis object
arr = ndtest('a=a0,a1;b=0..3')
raw = arr.data
res = arr[arr.b < 2]
assert_array_equal(res, raw[:, :2])
# using an AxisReference (ExprNode)
res = arr[X.b < 2]
assert_array_equal(res, raw[:, :2])
def test_getitem_bool_larray_key_arr_wh_bool_axis():
gender = Axis([False, True], 'gender')
arr = Array([0.1, 0.2], gender)
id_axis = Axis('id=0..3')
key = Array([True, False, True, True], id_axis)
expected = Array([0.2, 0.1, 0.2, 0.2], id_axis)
# LGroup using the real axis
assert_larray_equal(arr[gender[key]], expected)
# LGroup using an AxisReference
assert_larray_equal(arr[X.gender[key]], expected)
# this test checks that the current behavior does not change unintentionally...
# ... but I am unsure the current behavior is what we actually want
msg = re.escape("boolean subset key contains more axes ({id}) than array ({gender})")
with pytest.raises(ValueError, match=msg):
arr[key]
def test_getitem_bool_larray_and_group_key():
arr = ndtest((3, 6, 4)).set_labels('b', '0..5')
# using axis
res = arr['a0,a2', arr.b < 3, 'c0:c3']
assert isinstance(res, Array)
assert res.ndim == 3
expected = arr['a0,a2', '0:2', 'c0:c3']
assert_array_equal(res, expected)
# using axis reference
res = arr['a0,a2', X.b < 3, 'c0:c3']
assert isinstance(res, Array)
assert res.ndim == 3
assert_array_equal(res, expected)
def test_getitem_bool_ndarray_key_arr_whout_bool_axis(array):
raw = array.data
res = array[raw < 5]
assert isinstance(res, Array)
assert res.ndim == 1
assert_array_equal(res, raw[raw < 5])
def test_getitem_bool_ndarray_key_arr_wh_bool_axis():
gender = Axis([False, True], 'gender')
arr = Array([0.1, 0.2], gender)
key = np.array([True, False, True, True])
expected = arr.i[[1, 0, 1, 1]]
# LGroup using the real axis
assert_larray_equal(arr[gender[key]], expected)
# LGroup using an AxisReference
assert_larray_equal(arr[X.gender[key]], expected)
# raw key => ???
# this test checks that the current behavior does not change unintentionally...
# ... but I am unsure the current behavior is what we actually want
# L? is to account for Python2 where shape can be 'long' integers
msg = r"boolean key with a different shape \(\(4L?,\)\) than array \(\(2,\)\)"
with pytest.raises(ValueError, match=msg):
arr[key]
def test_getitem_bool_anonymous_axes():
a = ndtest([Axis(2), Axis(3), Axis(4), Axis(5)])
mask = ones(a.axes[1, 3], dtype=bool)
res = a[mask]
assert res.ndim == 3
assert res.shape == (15, 2, 4)
# XXX: we might want to transpose the result to always move combined axes to the front
a = ndtest([Axis(2), Axis(3), Axis(4), Axis(5)])
mask = ones(a.axes[1, 2], dtype=bool)
res = a[mask]
assert res.ndim == 3
assert res.shape == (2, 12, 5)
def test_getitem_igroup_on_int_axis():
a = Axis('a=1..3')
arr = ndtest(a)
assert arr[a.i[1]] == 1
def test_getitem_integer_string_axes():
arr = ndtest((5, 5))
a, b = arr.axes
assert_array_equal(arr['0[a0, a2]'], arr[a['a0', 'a2']])
assert_array_equal(arr['0[a0:a2]'], arr[a['a0:a2']])
with pytest.raises(ValueError):
arr['1[a0, a2]']
assert_array_equal(arr['0.i[0, 2]'], arr[a.i[0, 2]])
assert_array_equal(arr['0.i[0:2]'], arr[a.i[0:2]])
with pytest.raises(ValueError):
arr['3.i[0, 2]']
def test_getitem_int_larray_lgroup_key():
# e axis go from 0 to 3
arr = ndtest("c=0,1; d=0,1; e=0..3")
# key values go from 0 to 3
key = ndtest("a=0,1; b=0,1")
# this replaces 'e' axis by 'a' and 'b' axes
res = arr[X.e[key]]
assert res.shape == (2, 2, 2, 2)
assert res.axes.names == ['c', 'd', 'a', 'b']
def test_getitem_structured_key_with_groups():
arr = ndtest((3, 2))
expected = arr['a1':]
a, b = arr.axes
alt_a = Axis('a=a1..a3')
# a) slice with lgroup
# a.1) LGroup.axis from array.axes
assert_array_equal(arr[a['a1']:a['a2']], expected)
# a.2) LGroup.axis not from array.axes
assert_array_equal((arr[alt_a['a1']:alt_a['a2']]), expected)
# b) slice with igroup
# b.1) IGroup.axis from array.axes
assert_array_equal((arr[a.i[1]:a.i[2]]), expected)
# b.2) IGroup.axis not from array.axes
assert_array_equal((arr[alt_a.i[0]:alt_a.i[1]]), expected)
# c) list with LGroup
# c.1) LGroup.axis from array.axes
assert_array_equal((arr[[a['a1'], a['a2']]]), expected)
# c.2) LGroup.axis not from array.axes
assert_array_equal((arr[[alt_a['a1'], alt_a['a2']]]), expected)
# d) list with IGroup
# d.1) IGroup.axis from array.axes
assert_array_equal((arr[[a.i[1], a.i[2]]]), expected)
# d.2) IGroup.axis not from array.axes
assert_array_equal((arr[[alt_a.i[0], alt_a.i[1]]]), expected)
def test_getitem_single_larray_key_guess():
# TODO: we really need another way to get test axes, e.g. testaxes(2, 3, 4) or testaxes((2, 3, 4))
a, b, c = ndtest((2, 3, 4)).axes
arr = ndtest((a, b))
# >>> arr
# a\b b0 b1 b2
# a0 0 1 2
# a1 3 4 5
# 1) key with extra axis
key = Array(['a0', 'a1', 'a1', 'a0'], c)
# replace the target axis by the extra axis
expected = from_string(r"""
c\b b0 b1 b2
c0 0 1 2
c1 3 4 5
c2 3 4 5
c3 0 1 2""")
assert_array_equal(arr[key], expected)
# 2) key with the target axis (the one being replaced)
key = Array(['b1', 'b0', 'b2'], b)
# axis stays the same but data should be flipped/shuffled
expected = from_string(r"""
a\b b0 b1 b2
a0 1 0 2
a1 4 3 5""")
assert_array_equal(arr[key], expected)
# 2bis) key with part of the target axis (the one being replaced)
key = Array(['b2', 'b1'], 'b=b0,b1')
expected = from_string(r"""
a\b b0 b1
a0 2 1
a1 5 4""")
assert_array_equal(arr[key], expected)
# 3) key with another existing axis (not the target axis)
key = Array(['a0', 'a1', 'a0'], b)
expected = from_string("""
b b0 b1 b2
\t 0 4 2""")
assert_array_equal(arr[key], expected)
# TODO: this does not work yet but should be much easier to implement with "align" in make_np_broadcastable
# 3bis) key with *part* of another existing axis (not the target axis)
# key = Array(['a1', 'a0'], 'b=b0,b1')
# expected = from_string("""
# b b0 b1
# \t 3 1""")
# assert_array_equal(arr[key], expected)
# 4) key has both the target axis and another existing axis
# TODO: maybe we should make this work without requiring astype!
key = from_string(r"""
a\b b0 b1 b2
a0 a0 a1 a0
a1 a1 a0 a1""").astype(str)
expected = from_string(r"""
a\b b0 b1 b2
a0 0 4 2
a1 3 1 5""")
assert_array_equal(arr[key], expected)
# 5) key has both the target axis and an extra axis
key = from_string(r"""
a\c c0 c1 c2 c3
a0 a0 a1 a1 a0
a1 a1 a0 a0 a1""").astype(str)
expected = from_string(r"""
a c\b b0 b1 b2
a0 c0 0 1 2
a0 c1 3 4 5
a0 c2 3 4 5
a0 c3 0 1 2
a1 c0 3 4 5
a1 c1 0 1 2
a1 c2 0 1 2
a1 c3 3 4 5""")
assert_array_equal(arr[key], expected)
# 6) key has both another existing axis (not target) and an extra axis
key = from_string(r"""
a\c c0 c1 c2 c3
a0 b0 b1 b0 b1
a1 b2 b1 b2 b1""").astype(str)
expected = from_string(r"""
a\c c0 c1 c2 c3
a0 0 1 0 1
a1 5 4 5 4""")
assert_array_equal(arr[key], expected)
# 7) key has the target axis, another existing axis and an extra axis
key = from_string(r"""
a b\c c0 c1 c2 c3
a0 b0 a0 a1 a0 a1
a0 b1 a1 a0 a1 a0
a0 b2 a0 a1 a0 a1
a1 b0 a0 a1 a1 a0
a1 b1 a1 a1 a1 a1
a1 b2 a0 a1 a1 a0""").astype(str)
expected = from_string(r"""
a b\c c0 c1 c2 c3
a0 b0 0 3 0 3
a0 b1 4 1 4 1
a0 b2 2 5 2 5
a1 b0 0 3 3 0
a1 b1 4 4 4 4
a1 b2 2 5 5 2""")
assert_array_equal(arr[key], expected)
def test_getitem_multiple_larray_key_guess():
a, b, c, d, e = ndtest((2, 3, 2, 3, 2)).axes
arr = ndtest((a, b))
# >>> arr
# a\b b0 b1 b2
# a0 0 1 2
# a1 3 4 5
# 1) keys with each a different existing axis
k1 = from_string(""" a a1 a0
\t b2 b0""")
k2 = from_string(""" b b1 b2 b3
\t a0 a1 a0""")
expected = from_string(r"""b\a a1 a0
b1 2 0
b2 5 3
b3 2 0""")
assert_array_equal(arr[k1, k2], expected)
# 2) keys with a common existing axis
k1 = from_string(""" b b0 b1 b2
\t a1 a0 a1""")
k2 = from_string(""" b b0 b1 b2
\t b1 b2 b0""")
expected = from_string(""" b b0 b1 b2
\t 4 2 3""")
assert_array_equal(arr[k1, k2], expected)
# 3) keys with each a different extra axis
k1 = from_string(""" c c0 c1
\t a1 a0""")
k2 = from_string(""" d d0 d1 d2
\t b1 b2 b0""")
expected = from_string(r"""c\d d0 d1 d2
c0 4 5 3
c1 1 2 0""")
assert_array_equal(arr[k1, k2], expected)
# 4) keys with a common extra axis
k1 = from_string(r"""c\d d0 d1 d2
c0 a1 a0 a1
c1 a0 a1 a0""").astype(str)
k2 = from_string(r"""c\e e0 e1
c0 b1 b2
c1 b0 b1""").astype(str)
expected = from_string(r""" c d\e e0 e1
c0 d0 4 5
c0 d1 1 2
c0 d2 4 5
c1 d0 0 1
c1 d1 3 4
c1 d2 0 1""")
assert_array_equal(arr[k1, k2], expected)
def test_getitem_ndarray_key_guess(array):
raw = array.data
keys = ['P04', 'P01', 'P03', 'P02']
key = np.array(keys)
res = array[key]
assert isinstance(res, Array)
assert res.axes == array.axes.replace(X.lipro, Axis(keys, 'lipro'))
assert_array_equal(res, raw[:, :, :, [3, 0, 2, 1]])
def test_getitem_int_larray_key_guess():
a = Axis([0, 1], 'a')
b = Axis([2, 3], 'b')
c = Axis([4, 5], 'c')
d = Axis([6, 7], 'd')
e = Axis([8, 9, 10, 11], 'e')
arr = ndtest([c, d, e])
key = Array([[8, 9], [10, 11]], [a, b])
assert arr[key].axes == [c, d, a, b]
def test_getitem_int_ndarray_key_guess():
c = Axis([4, 5], 'c')
d = Axis([6, 7], 'd')
e = Axis([8, 9, 10, 11], 'e')
arr = ndtest([c, d, e])
# ND keys do not work yet
# key = nparray([[8, 11], [10, 9]])
key = np.array([8, 11, 10])
res = arr[key]
assert res.axes == [c, d, Axis([8, 11, 10], 'e')]
def test_getitem_axis_object():
arr = ndtest((2, 3))
a, b = arr.axes
assert_array_equal(arr[a], arr)
assert_array_equal(arr[b], arr)
b2 = Axis('b=b0,b2')
assert_array_equal(arr[b2], from_string("""a\\b b0 b2
a0 0 2
a1 3 5"""))
def test_getitem_empty_tuple():
# an empty tuple should return a view on the original array
arr = ndtest((2, 3))
res = arr[()]
assert_array_equal(res, arr)
assert res is not arr
z = Array(0)
res = z[()]
assert res == z
assert res is not z
def test_positional_indexer_getitem(array):
raw = array.data
for key in [0, (0, 5, 1, 2), (slice(None), 5, 1), (0, 5), [1, 0], ([1, 0], 5)]:
assert_array_equal(array.i[key], raw[key])
assert_array_equal(array.i[[1, 0], [5, 4]], raw[np.ix_([1, 0], [5, 4])])
with pytest.raises(IndexError):
array.i[0, 0, 0, 0, 0]
def test_positional_indexer_setitem(array):
for key in [0, (0, 2, 1, 2), (slice(None), 2, 1), (0, 2), [1, 0], ([1, 0], 2)]:
arr = array.copy()
raw = array.data.copy()
arr.i[key] = 42
raw[key] = 42
assert_array_equal(arr, raw)
raw = array.data
array.i[[1, 0], [5, 4]] = 42
raw[np.ix_([1, 0], [5, 4])] = 42
assert_array_equal(array, raw)
def test_points_indexer_getitem():
arr = ndtest((2, 3, 3))
raw = arr.data
keys = [
('a0',
0),
(('a0', 'c2'),
(0, slice(None), 2)),
(('a0', 'b1', 'c2'),
(0, 1, 2)),
# key in the "correct" order
((['a1', 'a0', 'a1', 'a0'], 'b1', ['c1', 'c0', 'c1', 'c0']),
([1, 0, 1, 0], 1, [1, 0, 1, 0])),
# key in the "wrong" order
((['a1', 'a0', 'a1', 'a0'], ['c1', 'c0', 'c1', 'c0'], 'b1'),
([1, 0, 1, 0], 1, [1, 0, 1, 0])),
# advanced key with a missing dimension
((['a1', 'a0', 'a1', 'a0'], ['c1', 'c0', 'c1', 'c0']),
([1, 0, 1, 0], slice(None), [1, 0, 1, 0])),
]
for label_key, index_key in keys:
assert_array_equal(arr.points[label_key], raw[index_key])
# XXX: we might want to raise KeyError or IndexError instead?
with pytest.raises(ValueError):
arr.points['a0', 'b1', 'c2', 'd0']
def test_points_indexer_setitem():
keys = [
('a0',
0),
(('a0', 'c2'),
(0, slice(None), 2)),
(('a0', 'b1', 'c2'),
(0, 1, 2)),
# key in the "correct" order
((['a1', 'a0', 'a1', 'a0'], 'b1', ['c1', 'c0', 'c1', 'c0']),
([1, 0, 1, 0], 1, [1, 0, 1, 0])),
# key in the "wrong" order
((['a1', 'a0', 'a1', 'a0'], ['c1', 'c0', 'c1', 'c0'], 'b1'),
([1, 0, 1, 0], 1, [1, 0, 1, 0])),
# advanced key with a missing dimension
((['a1', 'a0', 'a1', 'a0'], ['c1', 'c0', 'c1', 'c0']),
([1, 0, 1, 0], slice(None), [1, 0, 1, 0])),
]
for label_key, index_key in keys:
arr = ndtest((2, 3, 3))
raw = arr.data.copy()
arr.points[label_key] = 42
raw[index_key] = 42
assert_array_equal(arr, raw)
arr = ndtest(2)
# XXX: we might want to raise KeyError or IndexError instead?
with pytest.raises(ValueError):
arr.points['a0', 'b1'] = 42
# test when broadcasting is involved
arr = ndtest((2, 3, 4))
raw = arr.data.copy()
raw_value = raw[:, 0, 0].reshape(2, 1)
raw[:, [0, 1, 2], [0, 1, 2]] = raw_value
arr.points['b0,b1,b2', 'c0,c1,c2'] = arr['b0', 'c0']
assert_array_equal(arr, raw)
def test_setitem_larray(array, small_array):
"""
tests Array.__setitem__(key, value) where value is an Array
"""
age, geo, sex, lipro = array.axes
# 1) using a LGroup key
ages1_5_9 = age[[1, 5, 9]]
# a) value has exactly the same shape as the target slice
arr = array.copy()
raw = array.data.copy()
arr[ages1_5_9] = arr[ages1_5_9] + 25.0
raw[[1, 5, 9]] = raw[[1, 5, 9]] + 25.0
assert_array_equal(arr, raw)
# b) value has exactly the same shape but LGroup at a "wrong" positions
arr = array.copy()
arr[geo[:], ages1_5_9] = arr[ages1_5_9] + 25.0
# same raw as previous test
assert_array_equal(arr, raw)
# c) value has an extra length-1 axis
arr = array.copy()
raw = array.data.copy()
raw_value = raw[[1, 5, 9], np.newaxis] + 26.0
fake_axis = Axis(['label'], 'fake')
age_axis = arr[ages1_5_9].axes.age
value = Array(raw_value, axes=(age_axis, fake_axis, geo, sex, lipro))
arr[ages1_5_9] = value
raw[[1, 5, 9]] = raw[[1, 5, 9]] + 26.0
assert_array_equal(arr, raw)
# d) value has the same axes than target but one has length 1
# arr = array.copy()
# raw = array.data.copy()
# raw[[1, 5, 9]] = np.sum(raw[[1, 5, 9]], axis=1, keepdims=True)
# arr[ages1_5_9] = arr[ages1_5_9].sum(geo=(geo.all(),))
# assert_array_equal(arr, raw)
# e) value has a missing dimension
arr = array.copy()
raw = array.data.copy()
arr[ages1_5_9] = arr[ages1_5_9].sum(geo)
raw[[1, 5, 9]] = np.sum(raw[[1, 5, 9]], axis=1, keepdims=True)
assert_array_equal(arr, raw)
# 2) using a LGroup and scalar key (triggers advanced indexing/cross)
# a) value has exactly the same shape as the target slice
arr = array.copy()
raw = array.data.copy()
# using 1, 5, 8 and not 9 so that the list is not collapsed to slice
value = arr[age[1, 5, 8], sex['M']] + 25.0
arr[age[1, 5, 8], sex['M']] = value
raw[[1, 5, 8], :, 0] = raw[[1, 5, 8], :, 0] + 25.0
assert_array_equal(arr, raw)
# 3) using a string key
arr = array.copy()
raw = array.data.copy()
arr['1, 5, 9'] = arr['1, 5, 9'] + 27.0
raw[[1, 5, 9]] = raw[[1, 5, 9]] + 27.0
assert_array_equal(arr, raw)
# 4) using ellipsis keys
# only Ellipsis
arr = array.copy()
arr[...] = 0
assert_array_equal(arr, np.zeros_like(raw))
# Ellipsis and LGroup
arr = array.copy()
raw = array.data.copy()
arr[..., lipro['P01,P05,P09']] = 0
raw[..., [0, 4, 8]] = 0
assert_array_equal(arr, raw)
# 5) using a single slice(None) key
arr = array.copy()
arr[:] = 0
assert_array_equal(arr, np.zeros_like(raw))
# 6) incompatible axes
arr = small_array.copy()
la2 = small_array.copy()
with pytest.raises(ValueError, match="Value {!s} axis is not present in target subset {!s}. "
"A value can only have the same axes or fewer axes than the subset "
"being targeted".format(la2.axes - arr['P01'].axes, arr['P01'].axes)):
arr['P01'] = la2
la2 = arr.rename('sex', 'gender')
with pytest.raises(ValueError, match="Value {!s} axis is not present in target subset {!s}. "
"A value can only have the same axes or fewer axes than the subset "
"being targeted".format(la2['P01'].axes - arr['P01'].axes, arr['P01'].axes)):
arr['P01'] = la2['P01']
# 7) incompatible labels
sex2 = Axis('sex=F,M')
la2 = Array(small_array.data, axes=(sex2, lipro))
with pytest.raises(ValueError, match="incompatible axes:"):
arr[:] = la2
# key has multiple Arrays (this is used within .points indexing)
# ==============================================================
# first some setup
a = Axis(['a0', 'a1'], None)
b = Axis(['b0', 'b1', 'b2'], None)
expected = ndtest((a, b))
value = expected.combine_axes()
# a) with anonymous axes
combined_axis = value.axes[0]
a_key = Array([0, 0, 0, 1, 1, 1], combined_axis)
b_key = Array([0, 1, 2, 0, 1, 2], combined_axis)
key = (a.i[a_key], b.i[b_key])
array = empty((a, b))
array[key] = value
assert_array_equal(array, expected)
# b) with wildcard combined_axis
wild_combined_axis = combined_axis.ignore_labels()
wild_a_key = Array([0, 0, 0, 1, 1, 1], wild_combined_axis)
wild_b_key = Array([0, 1, 2, 0, 1, 2], wild_combined_axis)
wild_key = (a.i[wild_a_key], b.i[wild_b_key])
array = empty((a, b))
array[wild_key] = value
assert_array_equal(array, expected)
# c) with a wildcard value
wild_value = value.ignore_labels()
array = empty((a, b))
array[key] = wild_value
assert_array_equal(array, expected)
# d) with a wildcard combined axis and wildcard value
array = empty((a, b))
array[wild_key] = wild_value
assert_array_equal(array, expected)
def test_setitem_ndarray(array):
"""
tests Array.__setitem__(key, value) where value is a raw ndarray.
In that case, value.shape is more restricted as we rely on numpy broadcasting.
"""
# a) value has exactly the same shape as the target slice
arr = array.copy()
raw = array.data.copy()
value = raw[[1, 5, 9]] + 25.0
arr[[1, 5, 9]] = value
raw[[1, 5, 9]] = value
assert_array_equal(arr, raw)
# b) value has the same axes than target but one has length 1
arr = array.copy()
raw = array.data.copy()
value = np.sum(raw[[1, 5, 9]], axis=1, keepdims=True)
arr[[1, 5, 9]] = value
raw[[1, 5, 9]] = value
assert_array_equal(arr, raw)
def test_setitem_scalar(array):
"""
tests Array.__setitem__(key, value) where value is a scalar
"""
# a) list key (one dimension)
arr = array.copy()
raw = array.data.copy()
arr[[1, 5, 9]] = 42
raw[[1, 5, 9]] = 42
assert_array_equal(arr, raw)
# b) full scalar key (ie set one cell)
arr = array.copy()
raw = array.data.copy()
arr[0, 'P02', 'A12', 'M'] = 42
raw[0, 1, 0, 1] = 42
assert_array_equal(arr, raw)
def test_setitem_bool_array_key(array):
# XXX: this test is awfully slow (more than 1s)
age, geo, sex, lipro = array.axes
# Array key
# a1) same shape, same order
arr = array.copy()
raw = array.data.copy()
arr[arr < 5] = 0
raw[raw < 5] = 0
assert_array_equal(arr, raw)
# a2) same shape, different order
arr = array.copy()
raw = array.data.copy()
key = (arr < 5).T
arr[key] = 0
raw[raw < 5] = 0
assert_array_equal(arr, raw)
# b) numpy-broadcastable shape
# arr = array.copy()
# raw = array.data.copy()
# key = arr[sex['F,']] < 5
# self.assertEqual(key.ndim, 4)
# arr[key] = 0
# raw[raw[:, :, [1]] < 5] = 0
# assert_array_equal(arr, raw)
# c) Array-broadcastable shape (missing axis)
arr = array.copy()
raw = array.data.copy()
key = arr[sex['M']] < 5
assert key.ndim == 3
arr[key] = 0
raw_key = raw[:, :, 0, :] < 5
raw_d1, raw_d2, raw_d4 = raw_key.nonzero()
raw[raw_d1, raw_d2, :, raw_d4] = 0
assert_array_equal(arr, raw)
# ndarray key
arr = array.copy()
raw = array.data.copy()
arr[raw < 5] = 0
raw[raw < 5] = 0
assert_array_equal(arr, raw)
# d) Array with extra axes
arr = array.copy()
key = (arr < 5).expand([Axis(2, 'extra')])
assert key.ndim == 5
# TODO: make this work
with pytest.raises(ValueError):
arr[key] = 0
def test_set(array):
age, geo, sex, lipro = array.axes
# 1) using a LGroup key
ages1_5_9 = age[[1, 5, 9]]
# a) value has exactly the same shape as the target slice
arr = array.copy()
raw = array.data.copy()
arr.set(arr[ages1_5_9] + 25.0, age=ages1_5_9)
raw[[1, 5, 9]] = raw[[1, 5, 9]] + 25.0
assert_array_equal(arr, raw)
# b) same size but a different shape (extra length-1 axis)
arr = array.copy()
raw = array.data.copy()
raw_value = raw[[1, 5, 9], np.newaxis] + 26.0
fake_axis = Axis(['label'], 'fake')
age_axis = arr[ages1_5_9].axes.age
value = Array(raw_value, axes=(age_axis, fake_axis, geo, sex, lipro))
arr.set(value, age=ages1_5_9)
raw[[1, 5, 9]] = raw[[1, 5, 9]] + 26.0
assert_array_equal(arr, raw)
# dimension of length 1
# arr = array.copy()
# raw = array.data.copy()
# raw[[1, 5, 9]] = np.sum(raw[[1, 5, 9]], axis=1, keepdims=True)
# arr.set(arr[ages1_5_9].sum(geo=(geo.all(),)), age=ages1_5_9)
# assert_array_equal(arr, raw)
# c) missing dimension
arr = array.copy()
raw = array.data.copy()
arr.set(arr[ages1_5_9].sum(geo), age=ages1_5_9)
raw[[1, 5, 9]] = np.sum(raw[[1, 5, 9]], axis=1, keepdims=True)
assert_array_equal(arr, raw)
# 2) using a raw key
arr = array.copy()
raw = array.data.copy()
arr.set(arr[[1, 5, 9]] + 27.0, age=[1, 5, 9])
raw[[1, 5, 9]] = raw[[1, 5, 9]] + 27.0
assert_array_equal(arr, raw)
def test_filter(array):
age, geo, sex, lipro = array.axes
ages1_5_9 = age[(1, 5, 9)]
ages11 = age[11]
# with LGroup
assert array.filter(age=ages1_5_9).shape == (3, 44, 2, 15)
# FIXME: this should raise a comprehensible error!
# self.assertEqual(array.filter(age=[ages1_5_9]).shape, (3, 44, 2, 15))
# LGroup with 1 value => collapse
assert array.filter(age=ages11).shape == (44, 2, 15)
# LGroup with a list of 1 value => do not collapse
assert array.filter(age=age[[11]]).shape == (1, 44, 2, 15)
# LGroup with a list of 1 value defined as a string => do not collapse
assert array.filter(lipro=lipro['P01,']).shape == (116, 44, 2, 1)
# LGroup with 1 value
# XXX: this does not work. Do we want to make this work?
# filtered = array.filter(age=(ages11,))
# self.assertEqual(filtered.shape, (1, 44, 2, 15))
# list
assert array.filter(age=[1, 5, 9]).shape == (3, 44, 2, 15)
# string
assert array.filter(lipro='P01,P02').shape == (116, 44, 2, 2)
# multiple axes at once
assert array.filter(age=[1, 5, 9], lipro='P01,P02').shape == (3, 44, 2, 2)
# multiple axes one after the other
assert array.filter(age=[1, 5, 9]).filter(lipro='P01,P02').shape == (3, 44, 2, 2)
# a single value for one dimension => collapse the dimension
assert array.filter(sex='M').shape == (116, 44, 15)
# but a list with a single value for one dimension => do not collapse
assert array.filter(sex=['M']).shape == (116, 44, 1, 15)
assert array.filter(sex='M,').shape == (116, 44, 1, 15)
# with duplicate keys
# XXX: do we want to support this? I don't see any value in that but I might be short-sighted.
# filtered = array.filter(lipro='P01,P02,P01')
# XXX: we could abuse python to allow naming groups via Axis.__getitem__
# (but I doubt it is a good idea).
# child = age[':17', 'child']
# slices
# ------
# LGroup slice
assert array.filter(age=age[:17]).shape == (18, 44, 2, 15)
# string slice
assert array.filter(lipro=':P03').shape == (116, 44, 2, 3)
# raw slice
assert array.filter(age=slice(17)).shape == (18, 44, 2, 15)
# filter chain with a slice
assert array.filter(age=slice(17)).filter(geo='A12,A13').shape == (18, 2, 2, 15)
def test_filter_multiple_axes(array):
# multiple values in each group
assert array.filter(age=[1, 5, 9], lipro='P01,P02').shape == (3, 44, 2, 2)
# with a group of one value
assert array.filter(age=[1, 5, 9], sex='M,').shape == (3, 44, 1, 15)
# with a discarded axis (there is a scalar in the key)
assert array.filter(age=[1, 5, 9], sex='M').shape == (3, 44, 15)
# with a discarded axis that is not adjacent to the ix_array axis ie with a sliced axis between the scalar axis
# and the ix_array axis since our array has a axes: age, geo, sex, lipro, any of the following should be tested:
# age+sex / age+lipro / geo+lipro
# additionally, if the ix_array axis was first (ie ix_array on age), it worked even before the issue was fixed,
# since the "indexing" subspace is tacked-on to the beginning (as the first dimension)
assert array.filter(age=57, sex='M,F').shape == (44, 2, 15)
assert array.filter(age=57, lipro='P01,P05').shape == (44, 2, 2)
assert array.filter(geo='A57', lipro='P01,P05').shape == (116, 2, 2)
def test_nonzero():
arr = ndtest((2, 3))
a, b = arr.axes
cond = arr > 1
assert_array_equal(cond, from_string(r"""a\b b0 b1 b2
a0 False False True
a1 True True True"""))
a_group, b_group = cond.nonzero()
assert isinstance(a_group, IGroup)
assert a_group.axis is a
assert a_group.key.equals(from_string("""a_b a0_b2 a1_b0 a1_b1 a1_b2
\t 0 1 1 1"""))
assert isinstance(b_group, IGroup)
assert b_group.axis is b
assert b_group.key.equals(from_string("""a_b a0_b2 a1_b0 a1_b1 a1_b2
\t 2 0 1 2"""))
expected = from_string("""a_b a0_b2 a1_b0 a1_b1 a1_b2
\t 2 3 4 5""")
assert_array_equal(arr[a_group, b_group], expected)
assert_array_equal(arr.points[a_group, b_group], expected)
assert_array_equal(arr[cond], expected)
def test_contains():
arr = ndtest('a=0..2;b=b0..b2;c=2..4')
# string label
assert 'b1' in arr
assert 'b4' not in arr
# int label
assert 1 in arr
assert 5 not in arr
# duplicate label
assert 2 in arr
# slice
assert not slice('b0', 'b2') in arr
def test_sum_full_axes(array):
age, geo, sex, lipro = array.axes
# everything
assert array.sum() == np.asarray(array).sum()
# using axes numbers
assert array.sum(axis=2).shape == (116, 44, 15)
assert array.sum(axis=(0, 2)).shape == (44, 15)
# using Axis objects
assert array.sum(age).shape == (44, 2, 15)
assert array.sum(age, sex).shape == (44, 15)
# using axes names
assert array.sum('age', 'sex').shape == (44, 15)
# chained sum
assert array.sum(age, sex).sum(geo).shape == (15,)
assert array.sum(age, sex).sum(lipro, geo) == array.sum()
# getitem on aggregated
aggregated = array.sum(age, sex)
assert aggregated[vla_str].shape == (22, 15)
# filter on aggregated
assert aggregated.filter(geo=vla_str).shape == (22, 15)
def test_sum_full_axes_with_nan(array):
array['M', 'P02', 'A12', 0] = nan
raw = array.data
# everything
assert array.sum() == np.nansum(raw)
assert isnan(array.sum(skipna=False))
# using Axis objects
assert_array_nan_equal(array.sum(X.age), np.nansum(raw, 0))
assert_array_nan_equal(array.sum(X.age, skipna=False), raw.sum(0))
assert_array_nan_equal(array.sum(X.age, X.sex), np.nansum(raw, (0, 2)))
assert_array_nan_equal(array.sum(X.age, X.sex, skipna=False), raw.sum((0, 2)))
def test_sum_full_axes_keep_axes(array):
agg = array.sum(keepaxes=True)
assert agg.shape == (1, 1, 1, 1)
for axis in agg.axes:
assert axis.labels == ['sum']
agg = array.sum(keepaxes='total')
assert agg.shape == (1, 1, 1, 1)
for axis in agg.axes:
assert axis.labels == ['total']
def test_mean_full_axes(array):
raw = array.data
assert array.mean() == np.mean(raw)
assert_array_nan_equal(array.mean(X.age), np.mean(raw, 0))
assert_array_nan_equal(array.mean(X.age, X.sex), np.mean(raw, (0, 2)))
def test_mean_groups(array):
# using int type to test that we get a float in return
arr = array.astype(int)
raw = array.data
res = arr.mean(X.geo['A11', 'A13', 'A24', 'A31'])
assert_array_nan_equal(res, np.mean(raw[:, [0, 2, 4, 5]], 1))
def test_median_full_axes(array):
raw = array.data
assert array.median() == np.median(raw)
assert_array_nan_equal(array.median(X.age), np.median(raw, 0))
assert_array_nan_equal(array.median(X.age, X.sex), np.median(raw, (0, 2)))
def test_median_groups(array):
raw = array.data
res = array.median(X.geo['A11', 'A13', 'A24'])
assert res.shape == (116, 2, 15)
assert_array_nan_equal(res, np.median(raw[:, [0, 2, 4]], 1))
def test_percentile_full_axes():
arr = ndtest((2, 3, 4))
raw = arr.data
assert arr.percentile(10) == np.percentile(raw, 10)
assert_array_nan_equal(arr.percentile(10, X.a), np.percentile(raw, 10, 0))
assert_array_nan_equal(arr.percentile(10, X.c, X.a), np.percentile(raw, 10, (2, 0)))
def test_percentile_groups():
arr = ndtest((2, 5, 3))
raw = arr.data
res = arr.percentile(10, X.b['b0', 'b2', 'b4'])
assert_array_nan_equal(res, np.percentile(raw[:, [0, 2, 4]], 10, 1))
def test_cumsum(array):
raw = array.data
# using Axis objects
assert_array_equal(array.cumsum(X.age), raw.cumsum(0))
assert_array_equal(array.cumsum(X.lipro), raw.cumsum(3))
# using axes numbers
assert_array_equal(array.cumsum(1), raw.cumsum(1))
# using axes names
assert_array_equal(array.cumsum('sex'), raw.cumsum(2))
def test_group_agg_kwargs(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
# a) group aggregate on a fresh array
# a.1) one group => collapse dimension
assert array.sum(sex='M').shape == (116, 44, 15)
assert array.sum(sex='M,F').shape == (116, 44, 15)
assert array.sum(sex=sex['M']).shape == (116, 44, 15)
assert array.sum(geo='A11,A21,A25').shape == (116, 2, 15)
assert array.sum(geo=['A11', 'A21', 'A25']).shape == (116, 2, 15)
assert array.sum(geo=geo['A11,A21,A25']).shape == (116, 2, 15)
assert array.sum(geo=':').shape == (116, 2, 15)
assert array.sum(geo=geo[:]).shape == (116, 2, 15)
assert array.sum(geo=geo[':']).shape == (116, 2, 15)
# Include everything between two labels. Since A11 is the first label
# and A21 is the last one, this should be equivalent to the previous
# tests.
assert array.sum(geo='A11:A21').shape == (116, 2, 15)
assert_array_equal(array.sum(geo='A11:A21'), array.sum(geo=':'))
assert_array_equal(array.sum(geo=geo['A11:A21']), array.sum(geo=':'))
# a.2) a tuple of one group => do not collapse dimension
assert array.sum(geo=(geo[:],)).shape == (116, 1, 2, 15)
# a.3) several groups
# string groups
assert array.sum(geo=(vla, wal, bru)).shape == (116, 3, 2, 15)
# with one label in several groups
assert array.sum(sex=(['M'], ['M', 'F'])).shape == (116, 44, 2, 15)
assert array.sum(sex=('M', 'M,F')).shape == (116, 44, 2, 15)
assert array.sum(sex='M;M,F').shape == (116, 44, 2, 15)
res = array.sum(geo=(vla, wal, bru, belgium))
assert res.shape == (116, 4, 2, 15)
# a.4) several dimensions at the same time
res = array.sum(lipro='P01,P03;P02,P05;:', geo=(vla, wal, bru, belgium))
assert res.shape == (116, 4, 2, 3)
# b) both axis aggregate and group aggregate at the same time
# Note that you must list "full axes" aggregates first (Python does not allow non-kwargs after kwargs.
res = array.sum(age, sex, geo=(vla, wal, bru, belgium))
assert res.shape == (4, 15)
# c) chain group aggregate after axis aggregate
res = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
assert res.shape == (4, 15)
def test_group_agg_guess_axis(array):
raw = array.data.copy()
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
# a) group aggregate on a fresh array
# a.1) one group => collapse dimension
# not sure I should support groups with a single item in an aggregate
assert array.sum('M').shape == (116, 44, 15)
assert array.sum('M,').shape == (116, 44, 15)
assert array.sum('M,F').shape == (116, 44, 15)
assert array.sum('A11,A21,A25').shape == (116, 2, 15)
# with a name
assert array.sum('A11,A21,A25 >> g1').shape == (116, 2, 15)
assert array.sum(['A11', 'A21', 'A25']).shape == (116, 2, 15)
# Include everything between two labels. Since A11 is the first label
# and A21 is the last one, this should be equivalent to taking the
# full axis.
assert array.sum('A11:A21').shape == (116, 2, 15)
assert_array_equal(array.sum('A11:A21'), array.sum(geo=':'))
assert_array_equal(array.sum('A11:A21'), array.sum(geo))
# a.2) a tuple of one group => do not collapse dimension
assert array.sum((geo[:],)).shape == (116, 1, 2, 15)
# a.3) several groups
# string groups
assert array.sum((vla, wal, bru)).shape == (116, 3, 2, 15)
# XXX: do we also want to support this? I do not really like it because it gets tricky when we have some other
# axes into play. For now the error message is unclear because it first aggregates on "vla", then tries to
# aggregate on "wal", but there is no "geo" dimension anymore.
# self.assertEqual(array.sum(vla, wal, bru).shape, (116, 3, 2, 15))
# with one label in several groups
assert array.sum((['M'], ['M', 'F'])).shape == (116, 44, 2, 15)
assert array.sum(('M', 'M,F')).shape == (116, 44, 2, 15)
assert array.sum('M;M,F').shape == (116, 44, 2, 15)
# with group names
res = array.sum('M >> men;M,F >> all')
assert res.shape == (116, 44, 2, 15)
assert 'sex' in res.axes
men = sex['M'].named('men')
all_ = sex['M,F'].named('all')
assert_array_equal(res.axes.sex.labels, ['men', 'all'])
assert_array_equal(res['men'], raw[:, :, 0, :])
assert_array_equal(res['all'], raw.sum(2))
res = array.sum(('M >> men', 'M,F >> all'))
assert res.shape == (116, 44, 2, 15)
assert 'sex' in res.axes
assert_array_equal(res.axes.sex.labels, ['men', 'all'])
assert_array_equal(res['men'], raw[:, :, 0, :])
assert_array_equal(res['all'], raw.sum(2))
res = array.sum((vla, wal, bru, belgium))
assert res.shape == (116, 4, 2, 15)
# a.4) several dimensions at the same time
res = array.sum('P01,P03;P02,P05;P01:', (vla, wal, bru, belgium))
assert res.shape == (116, 4, 2, 3)
# b) both axis aggregate and group aggregate at the same time
res = array.sum(age, sex, (vla, wal, bru, belgium))
assert res.shape == (4, 15)
# c) chain group aggregate after axis aggregate
res = array.sum(age, sex).sum((vla, wal, bru, belgium))
assert res.shape == (4, 15)
def test_group_agg_label_group(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = geo[vla_str], geo[wal_str], geo[bru_str]
lg_belgium = geo[belgium]
# a) group aggregate on a fresh array
# a.1) one group => collapse dimension
# not sure I should support groups with a single item in an aggregate
men = sex.i[[0]]
assert array.sum(men).shape == (116, 44, 15)
assert array.sum(sex['M']).shape == (116, 44, 15)
assert array.sum(sex['M,']).shape == (116, 44, 15)
assert array.sum(sex['M,F']).shape == (116, 44, 15)
assert array.sum(geo['A11,A21,A25']).shape == (116, 2, 15)
assert array.sum(geo[['A11', 'A21', 'A25']]).shape == (116, 2, 15)
assert array.sum(geo['A11', 'A21', 'A25']).shape == (116, 2, 15)
assert array.sum(geo['A11,A21,A25']).shape == (116, 2, 15)
assert array.sum(geo[:]).shape == (116, 2, 15)
assert array.sum(geo[':']).shape == (116, 2, 15)
assert array.sum(geo[:]).shape == (116, 2, 15)
# Include everything between two labels. Since A11 is the first label and A21 is the last one, this should be
# equivalent to the previous tests.
assert array.sum(geo['A11:A21']).shape == (116, 2, 15)
assert_array_equal(array.sum(geo['A11:A21']), array.sum(geo))
assert_array_equal(array.sum(geo['A11':'A21']), array.sum(geo))
# a.2) a tuple of one group => do not collapse dimension
assert array.sum((geo[:],)).shape == (116, 1, 2, 15)
# a.3) several groups
# string groups
assert array.sum((vla, wal, bru)).shape == (116, 3, 2, 15)
# XXX: do we also want to support this? I do not really like it because it gets tricky when we have some other
# axes into play. For now the error message is unclear because it first aggregates on "vla", then tries to
# aggregate on "wal", but there is no "geo" dimension anymore.
# self.assertEqual(array.sum(vla, wal, bru).shape, (116, 3, 2, 15))
# with one label in several groups
assert array.sum((sex['M'], sex[['M', 'F']])).shape == (116, 44, 2, 15)
assert array.sum((sex['M'], sex['M', 'F'])).shape == (116, 44, 2, 15)
assert array.sum((sex['M'], sex['M,F'])).shape == (116, 44, 2, 15)
# XXX: do we want to support this?
# self.assertEqual(array.sum(sex['M;H,F']).shape, (116, 44, 2, 15))
res = array.sum((vla, wal, bru, lg_belgium))
assert res.shape == (116, 4, 2, 15)
# a.4) several dimensions at the same time
# self.assertEqual(array.sum(lipro['P01,P03;P02,P05;P01:'], (vla, wal, bru, lg_belgium)).shape,
# (116, 4, 2, 3))
res = array.sum((lipro['P01,P03'], lipro['P02,P05'], lipro[:]), (vla, wal, bru, lg_belgium))
assert res.shape == (116, 4, 2, 3)
# b) both axis aggregate and group aggregate at the same time
res = array.sum(age, sex, (vla, wal, bru, lg_belgium))
assert res.shape == (4, 15)
# c) chain group aggregate after axis aggregate
res = array.sum(age, sex).sum((vla, wal, bru, lg_belgium))
assert res.shape == (4, 15)
def test_group_agg_label_group_no_axis(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = LGroup(vla_str), LGroup(wal_str), LGroup(bru_str)
lg_belgium = LGroup(belgium)
# a) group aggregate on a fresh array
# a.1) one group => collapse dimension
# not sure I should support groups with a single item in an aggregate
assert array.sum(LGroup('M')).shape == (116, 44, 15)
assert array.sum(LGroup('M,')).shape == (116, 44, 15)
assert array.sum(LGroup('M,F')).shape == (116, 44, 15)
assert array.sum(LGroup('A11,A21,A25')).shape == (116, 2, 15)
assert array.sum(LGroup(['A11', 'A21', 'A25'])).shape == (116, 2, 15)
# Include everything between two labels. Since A11 is the first label
# and A21 is the last one, this should be equivalent to the full axis.
assert array.sum(LGroup('A11:A21')).shape == (116, 2, 15)
assert_array_equal(array.sum(LGroup('A11:A21')), array.sum(geo))
assert_array_equal(array.sum(LGroup(slice('A11', 'A21'))), array.sum(geo))
# a.3) several groups
# string groups
assert array.sum((vla, wal, bru)).shape == (116, 3, 2, 15)
# XXX: do we also want to support this? I do not really like it because it gets tricky when we have some other
# axes into play. For now the error message is unclear because it first aggregates on "vla", then tries to
# aggregate on "wal", but there is no "geo" dimension anymore.
# self.assertEqual(array.sum(vla, wal, bru).shape, (116, 3, 2, 15))
# with one label in several groups
assert array.sum((LGroup('M'), LGroup(['M', 'F']))).shape == (116, 44, 2, 15)
assert array.sum((LGroup('M'), LGroup('M,F'))).shape == (116, 44, 2, 15)
# XXX: do we want to support this?
# self.assertEqual(array.sum(sex['M;M,F']).shape, (116, 44, 2, 15))
res = array.sum((vla, wal, bru, lg_belgium))
assert res.shape == (116, 4, 2, 15)
# a.4) several dimensions at the same time
# self.assertEqual(array.sum(lipro['P01,P03;P02,P05;P01:'], (vla, wal, bru, lg_belgium)).shape,
# (116, 4, 2, 3))
res = array.sum((LGroup('P01,P03'), LGroup('P02,P05')), (vla, wal, bru, lg_belgium))
assert res.shape == (116, 4, 2, 2)
# b) both axis aggregate and group aggregate at the same time
res = array.sum(age, sex, (vla, wal, bru, lg_belgium))
assert res.shape == (4, 15)
# c) chain group aggregate after axis aggregate
res = array.sum(age, sex).sum((vla, wal, bru, lg_belgium))
assert res.shape == (4, 15)
def test_group_agg_axis_ref_label_group(array):
age, geo, sex, lipro = X.age, X.geo, X.sex, X.lipro
vla, wal, bru = geo[vla_str], geo[wal_str], geo[bru_str]
lg_belgium = geo[belgium]
# a) group aggregate on a fresh array
# a.1) one group => collapse dimension
# not sure I should support groups with a single item in an aggregate
men = sex.i[[0]]
assert array.sum(men).shape == (116, 44, 15)
assert array.sum(sex['M']).shape == (116, 44, 15)
assert array.sum(sex['M,']).shape == (116, 44, 15)
assert array.sum(sex['M,F']).shape == (116, 44, 15)
assert array.sum(geo['A11,A21,A25']).shape == (116, 2, 15)
assert array.sum(geo[['A11', 'A21', 'A25']]).shape == (116, 2, 15)
assert array.sum(geo['A11', 'A21', 'A25']).shape == (116, 2, 15)
assert array.sum(geo['A11,A21,A25']).shape == (116, 2, 15)
assert array.sum(geo[:]).shape == (116, 2, 15)
assert array.sum(geo[':']).shape == (116, 2, 15)
assert array.sum(geo[:]).shape == (116, 2, 15)
# Include everything between two labels. Since A11 is the first label
# and A21 is the last one, this should be equivalent to the previous
# tests.
assert array.sum(geo['A11:A21']).shape == (116, 2, 15)
assert_array_equal(array.sum(geo['A11:A21']), array.sum(geo))
assert_array_equal(array.sum(geo['A11':'A21']), array.sum(geo))
# a.2) a tuple of one group => do not collapse dimension
assert array.sum((geo[:],)).shape == (116, 1, 2, 15)
# a.3) several groups
# string groups
assert array.sum((vla, wal, bru)).shape == (116, 3, 2, 15)
# XXX: do we also want to support this? I do not really like it because
# it gets tricky when we have some other axes into play. For now the
# error message is unclear because it first aggregates on "vla", then
# tries to aggregate on "wal", but there is no "geo" dimension anymore.
# self.assertEqual(array.sum(vla, wal, bru).shape, (116, 3, 2, 15))
# with one label in several groups
assert array.sum((sex['M'], sex[['M', 'F']])).shape == (116, 44, 2, 15)
assert array.sum((sex['M'], sex['M', 'F'])).shape == (116, 44, 2, 15)
assert array.sum((sex['M'], sex['M,F'])).shape == (116, 44, 2, 15)
# XXX: do we want to support this?
# self.assertEqual(array.sum(sex['M;M,F']).shape, (116, 44, 2, 15))
res = array.sum((vla, wal, bru, lg_belgium))
assert res.shape == (116, 4, 2, 15)
# a.4) several dimensions at the same time
# self.assertEqual(array.sum(lipro['P01,P03;P02,P05;P01:'],
# (vla, wal, bru, belgium)).shape,
# (116, 4, 2, 3))
res = array.sum((lipro['P01,P03'], lipro['P02,P05'], lipro[:]), (vla, wal, bru, lg_belgium))
assert res.shape == (116, 4, 2, 3)
# b) both axis aggregate and group aggregate at the same time
res = array.sum(age, sex, (vla, wal, bru, lg_belgium))
assert res.shape == (4, 15)
# c) chain group aggregate after axis aggregate
res = array.sum(age, sex).sum((vla, wal, bru, lg_belgium))
assert res.shape == (4, 15)
def test_group_agg_one_axis():
a = Axis(range(3), 'a')
la = ndtest(a)
raw = np.asarray(la)
assert_array_equal(la.sum(a[0, 2]), raw[[0, 2]].sum())
def test_group_agg_anonymous_axis():
la = ndtest([Axis(2), Axis(3)])
a1, a2 = la.axes
raw = np.asarray(la)
assert_array_equal(la.sum(a2[0, 2]), raw[:, [0, 2]].sum(1))
def test_group_agg_zero_padded_label():
arr = ndtest("a=01,02,03,10,11; b=b0..b2")
expected = Array([36, 30, 39], "a=01_03,10,11")
assert_array_equal(arr.sum("01,02,03 >> 01_03; 10; 11", "b"), expected)
def test_group_agg_on_int_array():
# issue 193
arr = ndtest('year=2014..2018')
group = arr.year[:2016]
assert arr.mean(group) == 1.0
assert arr.median(group) == 1.0
assert arr.percentile(90, group) == 1.8
assert arr.std(group) == 1.0
assert arr.var(group) == 1.0
def test_group_agg_on_bool_array():
# issue 194
a = ndtest((2, 3))
b = a > 1
expected = from_string("""a,a0,a1
, 1, 2""", sep=',')
assert_array_equal(b.sum('b1:'), expected)
# TODO: fix this (and add other tests for references (X.) to anonymous axes
# def test_group_agg_anonymous_axis_ref():
# la = ndtest([Axis(2), Axis(3)])
# raw = np.asarray(la)
# # this does not work because x[1] refers to an axis with name 1,
# # which does not exist. We might want to change this.
# assert_array_equal(la.sum(x[1][0, 2]), raw[:, [0, 2]].sum(1))
# group aggregates on a group-aggregated array
def test_group_agg_on_group_agg(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
reg = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
# 1) one group => collapse dimension
assert reg.sum(lipro='P01,P02').shape == (4,)
# 2) a tuple of one group => do not collapse dimension
assert reg.sum(lipro=('P01,P02',)).shape == (4, 1)
# 3) several groups
assert reg.sum(lipro='P01;P02;:').shape == (4, 3)
# this is INVALID
# TODO: raise a nice exception
# regsum = reg.sum(lipro='P01,P02,:')
# this is currently allowed even though it can be confusing:
# P01 and P02 are both groups with one element each.
assert reg.sum(lipro=('P01', 'P02', ':')).shape == (4, 3)
assert reg.sum(lipro=('P01', 'P02', lipro[:])).shape == (4, 3)
# explicit groups are better
assert reg.sum(lipro=('P01,', 'P02,', ':')).shape == (4, 3)
assert reg.sum(lipro=(['P01'], ['P02'], ':')).shape == (4, 3)
# 4) groups on the aggregated dimension
# self.assertEqual(reg.sum(geo=([vla, bru], [wal, bru])).shape, (2, 3))
# vla, wal, bru
# group aggregates on a group-aggregated array
def test_group_agg_on_group_agg_nokw(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
reg = array.sum(age, sex).sum((vla, wal, bru, belgium))
# XXX: should this be supported too? (it currently fails)
# reg = array.sum(age, sex).sum(vla, wal, bru, belgium)
# 1) one group => collapse dimension
assert reg.sum('P01,P02').shape == (4,)
# 2) a tuple of one group => do not collapse dimension
assert reg.sum(('P01,P02',)).shape == (4, 1)
# 3) several groups
# : is ambiguous
# self.assertEqual(reg.sum('P01;P02;:').shape, (4, 3))
assert reg.sum('P01;P02;P01:').shape == (4, 3)
# this is INVALID
# TODO: raise a nice exception
# regsum = reg.sum(lipro='P01,P02,:')
# this is currently allowed even though it can be confusing:
# P01 and P02 are both groups with one element each.
assert reg.sum(('P01', 'P02', 'P01:')).shape == (4, 3)
assert reg.sum(('P01', 'P02', lipro[:])).shape == (4, 3)
# explicit groups are better
assert reg.sum(('P01,', 'P02,', 'P01:')).shape == (4, 3)
assert reg.sum((['P01'], ['P02'], 'P01:')).shape == (4, 3)
# 4) groups on the aggregated dimension
# self.assertEqual(reg.sum(geo=([vla, bru], [wal, bru])).shape, (2, 3))
# vla, wal, bru
def test_getitem_on_group_agg(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
# using a string
vla = vla_str
reg = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
# the following are all equivalent
assert reg[vla].shape == (15,)
assert reg[(vla,)].shape == (15,)
assert reg[(vla, slice(None))].shape == (15,)
assert reg[vla, slice(None)].shape == (15,)
assert reg[vla, :].shape == (15,)
# one more level...
assert reg[vla]['P03'] == 389049848.0
# using an anonymous LGroup
vla = geo[vla_str]
reg = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
# the following are all equivalent
assert reg[vla].shape == (15,)
assert reg[(vla,)].shape == (15,)
assert reg[(vla, slice(None))].shape == (15,)
assert reg[vla, slice(None)].shape == (15,)
assert reg[vla, :].shape == (15,)
# using a named LGroup
vla = geo[vla_str] >> 'Vlaanderen'
reg = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
# the following are all equivalent
assert reg[vla].shape == (15,)
assert reg[(vla,)].shape == (15,)
assert reg[(vla, slice(None))].shape == (15,)
assert reg[vla, slice(None)].shape == (15,)
assert reg[vla, :].shape == (15,)
def test_getitem_on_group_agg_nokw(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
# using a string
vla = vla_str
reg = array.sum(age, sex).sum((vla, wal, bru, belgium))
# the following are all equivalent
assert reg[vla].shape == (15,)
assert reg[(vla,)].shape == (15,)
assert reg[(vla, slice(None))].shape == (15,)
assert reg[vla, slice(None)].shape == (15,)
assert reg[vla, :].shape == (15,)
# one more level...
assert reg[vla]['P03'] == 389049848.0
# using an anonymous LGroup
vla = geo[vla_str]
reg = array.sum(age, sex).sum((vla, wal, bru, belgium))
# the following are all equivalent
assert reg[vla].shape == (15,)
assert reg[(vla,)].shape == (15,)
assert reg[(vla, slice(None))].shape == (15,)
assert reg[vla, slice(None)].shape == (15,)
assert reg[vla, :].shape == (15,)
# using a named LGroup
vla = geo[vla_str] >> 'Vlaanderen'
reg = array.sum(age, sex).sum((vla, wal, bru, belgium))
# the following are all equivalent
assert reg[vla].shape == (15,)
assert reg[(vla,)].shape == (15,)
assert reg[(vla, slice(None))].shape == (15,)
assert reg[vla, slice(None)].shape == (15,)
assert reg[vla, :].shape == (15,)
def test_filter_on_group_agg(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
# using a string
# vla = vla_str
# reg = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
# assert reg.filter(geo=vla).shape == (15,)
# using a named LGroup
vla = geo[vla_str] >> 'Vlaanderen'
reg = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
assert reg.filter(geo=vla).shape == (15,)
# Note that reg.filter(geo=(vla,)) does NOT work. It might be a
# little confusing for users, because reg[(vla,)] works but it is
# normal because reg.filter(geo=(vla,)) is equivalent to:
# reg[((vla,),)] or reg[(vla,), :]
# mixed LGroup/string slices
child = age[:17]
child_named = age[:17] >> 'child'
working = age[18:64]
retired = age[65:]
byage = array.sum(age=(child, 5, working, retired))
assert byage.shape == (4, 44, 2, 15)
byage = array.sum(age=(child, slice(5, 10), working, retired))
assert byage.shape == (4, 44, 2, 15)
# filter on an aggregated larray created with mixed groups
# assert byage.filter(age=':17').shape == (44, 2, 15)
byage = array.sum(age=(child_named, 5, working, retired))
assert byage.filter(age=child_named).shape == (44, 2, 15)
def test_sum_several_lg_groups(array):
# 1) aggregated array created using LGroups
# -----------------------------------------
fla = geo[vla_str] >> 'Flanders'
wal = geo[wal_str] >> 'Wallonia'
bru = geo[bru_str] >> 'Brussels'
reg = array.sum(geo=(fla, wal, bru))
assert reg.shape == (116, 3, 2, 15)
# the result is indexable
# 1.a) by LGroup
assert reg.filter(geo=fla).shape == (116, 2, 15)
assert reg.filter(geo=(fla, wal)).shape == (116, 2, 2, 15)
# 1.b) by string (name of groups)
assert reg.filter(geo='Flanders').shape == (116, 2, 15)
assert reg.filter(geo='Flanders,Wallonia').shape == (116, 2, 2, 15)
# 2) aggregated array created using string groups
# -----------------------------------------------
reg = array.sum(geo=(vla_str, wal_str, bru_str))
assert reg.shape == (116, 3, 2, 15)
# the result is indexable
# 2.a) by string (def)
# assert reg.filter(geo=vla_str).shape == (116, 2, 15)
assert reg.filter(geo=(vla_str, wal_str)).shape == (116, 2, 2, 15)
# 2.b) by LGroup
# assert reg.filter(geo=fla).shape == (116, 2, 15)
# assert reg.filter(geo=(fla, wal)).shape == (116, 2, 2, 15)
def test_sum_with_groups_from_other_axis(small_array):
# use a group from another *compatible* axis
lipro2 = Axis('lipro=P01..P15')
assert small_array.sum(lipro=lipro2['P01,P03']).shape == (2,)
# use (compatible) group from another *incompatible* axis
# XXX: I am unsure whether or not this should be allowed. Maybe we
# should simply check that the group is valid in axis, but that
# will trigger a pretty meaningful error anyway
lipro3 = Axis('lipro=P01,P03,P05')
assert small_array.sum(lipro3['P01,P03']).shape == (2,)
# use a group (from another axis) which is incompatible with the axis of
# the same name in the array
lipro4 = Axis('lipro=P01,P03,P16')
with pytest.raises(ValueError, match=r"lipro\['P01', 'P16'\] is not a valid label for any axis"):
small_array.sum(lipro4['P01,P16'])
def test_agg_kwargs(array):
raw = array.data
# dtype
assert array.sum(dtype=int) == raw.sum(dtype=int)
# ddof
assert array.std(ddof=0) == raw.std(ddof=0)
# out
res = array.std(X.sex)
out = zeros_like(res)
array.std(X.sex, out=out)
assert_array_equal(res, out)
def test_agg_by(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
# no group or axis
assert array.sum_by().shape == ()
assert array.sum_by() == array.sum()
# a) group aggregate on a fresh array
# a.1) one group
res = array.sum_by(geo='A11,A21,A25')
assert res.shape == ()
assert res == array.sum(geo='A11,A21,A25').sum()
# a.2) a tuple of one group
res = array.sum_by(geo=(geo[:],))
assert res.shape == (1,)
assert_array_equal(res, array.sum(age, sex, lipro, geo=(geo[:],)))
# a.3) several groups
# string groups
res = array.sum_by(geo=(vla, wal, bru))
assert res.shape == (3,)
assert_array_equal(res, array.sum(age, sex, lipro, geo=(vla, wal, bru)))
# with one label in several groups
assert array.sum_by(sex=(['M'], ['M', 'F'])).shape == (2,)
assert array.sum_by(sex=('M', 'M,F')).shape == (2,)
res = array.sum_by(sex='M;M,F')
assert res.shape == (2,)
assert_array_equal(res, array.sum(age, geo, lipro, sex='M;M,F'))
# a.4) several dimensions at the same time
res = array.sum_by(geo=(vla, wal, bru, belgium), lipro='P01,P03;P02,P05;:')
assert res.shape == (4, 3)
assert_array_equal(res, array.sum(age, sex, geo=(vla, wal, bru, belgium), lipro='P01,P03;P02,P05;:'))
# b) both axis aggregate and group aggregate at the same time
# Note that you must list "full axes" aggregates first (Python does not allow non-kwargs after kwargs.
res = array.sum_by(sex, geo=(vla, wal, bru, belgium))
assert res.shape == (4, 2)
assert_array_equal(res, array.sum(age, lipro, geo=(vla, wal, bru, belgium)))
# c) chain group aggregate after axis aggregate
res = array.sum_by(geo, sex)
assert res.shape == (44, 2)
assert_array_equal(res, array.sum(age, lipro))
res2 = res.sum_by(geo=(vla, wal, bru, belgium))
assert res2.shape == (4,)
assert_array_equal(res2, res.sum(sex, geo=(vla, wal, bru, belgium)))
def test_agg_igroup():
arr = ndtest(3)
res = arr.sum((X.a.i[:2], X.a.i[1:]))
assert_array_equal(res.a.labels, [':a1', 'a1:'])
def test_ratio(array):
age, geo, sex, lipro = array.axes
regions = (vla_str, wal_str, bru_str, belgium)
reg = array.sum(age, sex, regions)
assert reg.shape == (4, 15)
fla = geo[vla_str] >> 'Flanders'
wal = geo[wal_str] >> 'Wallonia'
bru = geo[bru_str] >> 'Brussels'
regions = (fla, wal, bru)
reg = array.sum(age, sex, regions)
ratio = reg.ratio()
assert_array_equal(ratio, reg / reg.sum(geo, lipro))
assert ratio.shape == (3, 15)
ratio = reg.ratio(geo)
assert_array_equal(ratio, reg / reg.sum(geo))
assert ratio.shape == (3, 15)
ratio = reg.ratio(geo, lipro)
assert_array_equal(ratio, reg / reg.sum(geo, lipro))
assert ratio.shape == (3, 15)
assert ratio.sum() == 1.0
def test_percent(array):
age, geo, sex, lipro = array.axes
regions = (vla_str, wal_str, bru_str, belgium)
reg = array.sum(age, sex, regions)
assert reg.shape == (4, 15)
fla = geo[vla_str] >> 'Flanders'
wal = geo[wal_str] >> 'Wallonia'
bru = geo[bru_str] >> 'Brussels'
regions = (fla, wal, bru)
reg = array.sum(age, sex, regions)
percent = reg.percent()
assert_array_equal(percent, (reg * 100.0 / reg.sum(geo, lipro)))
assert percent.shape == (3, 15)
percent = reg.percent(geo)
assert_array_equal(percent, (reg * 100.0 / reg.sum(geo)))
assert percent.shape == (3, 15)
percent = reg.percent(geo, lipro)
assert_array_equal(percent, (reg * 100.0 / reg.sum(geo, lipro)))
assert percent.shape == (3, 15)
assert round(abs(percent.sum() - 100.0), 7) == 0
def test_total(array):
age, geo, sex, lipro = array.axes
# array = small_array
# sex, lipro = array.axes
assert array.with_total().shape == (117, 45, 3, 16)
assert array.with_total(sex).shape == (116, 44, 3, 15)
assert array.with_total(lipro).shape == (116, 44, 2, 16)
assert array.with_total(sex, lipro).shape == (116, 44, 3, 16)
fla = geo[vla_str] >> 'Flanders'
wal = geo[wal_str] >> 'Wallonia'
bru = geo[bru_str] >> 'Brussels'
bel = geo[:] >> 'Belgium'
assert array.with_total(geo=(fla, wal, bru), op=mean).shape == (116, 47, 2, 15)
assert array.with_total((fla, wal, bru), op=mean).shape == (116, 47, 2, 15)
# works but "wrong" for X.geo (double what is expected because it includes fla wal & bru)
# TODO: we probably want to display a warning (or even an error?) in that case.
# If we really want that behavior, we can still split the operation:
# .with_total((fla, wal, bru)).with_total(X.geo)
# OR we might want to only sum the axis as it was before the op (but that does not play well when working with
# multiple axes).
a1 = array.with_total(X.sex, (fla, wal, bru), X.geo, X.lipro)
assert a1.shape == (116, 48, 3, 16)
# correct total but the order is not very nice
a2 = array.with_total(X.sex, X.geo, (fla, wal, bru), X.lipro)
assert a2.shape == (116, 48, 3, 16)
# the correct way to do it
a3 = array.with_total(X.sex, (fla, wal, bru, bel), X.lipro)
assert a3.shape == (116, 48, 3, 16)
# a4 = array.with_total((lipro[':P05'], lipro['P05:']), op=mean)
a4 = array.with_total((':P05', 'P05:'), op=mean)
assert a4.shape == (116, 44, 2, 17)
def test_transpose():
arr = ndtest((2, 3, 4))
a, b, c = arr.axes
res = arr.transpose()
assert res.axes == [c, b, a]
res = arr.transpose('b', 'c', 'a')
assert res.axes == [b, c, a]
res = arr.transpose('b')
assert res.axes == [b, a, c]
# using Ellipsis instead of ... to avoid a syntax error on Python 2 (where ... is only available within [])
res = arr.transpose(Ellipsis, 'a')
assert res.axes == [b, c, a]
res = arr.transpose('c', Ellipsis, 'a')
assert res.axes == [c, b, a]
def test_transpose_anonymous():
a = ndtest([Axis(2), Axis(3), Axis(4)])
# reordered = a.transpose(0, 2, 1)
# self.assertEqual(reordered.shape, (2, 4, 3))
# axes = [1, 2]
# => union(axes, )
# => axes.extend([[0]])
# => breaks because [0] not compatible with axes[0]
# => breaks because [0] not compatible with [1]
# a real union should not care and should return
# [1, 2, 0] but will this break other stuff? My gut feeling is yes
# when doing a binop between anonymous axes, we use union too (that might be the problem) and we need *that*
# union to match axes by position
reordered = a.transpose(1, 2)
assert reordered.shape == (3, 4, 2)
reordered = a.transpose(2, 0)
assert reordered.shape == (4, 2, 3)
reordered = a.transpose()
assert reordered.shape == (4, 3, 2)
def test_binary_ops(small_array):
raw = small_array.data
assert_array_equal(small_array + small_array, raw + raw)
assert_array_equal(small_array + 1, raw + 1)
assert_array_equal(1 + small_array, 1 + raw)
assert_array_equal(small_array - small_array, raw - raw)
assert_array_equal(small_array - 1, raw - 1)
assert_array_equal(1 - small_array, 1 - raw)
assert_array_equal(small_array * small_array, raw * raw)
assert_array_equal(small_array * 2, raw * 2)
assert_array_equal(2 * small_array, 2 * raw)
with np.errstate(invalid='ignore'):
raw_res = raw / raw
with pytest.warns(RuntimeWarning) as caught_warnings:
res = small_array / small_array
assert_array_nan_equal(res, raw_res)
assert len(caught_warnings) == 1
warn_msg = "invalid value (NaN) encountered during operation (this is typically caused by a 0 / 0)"
assert caught_warnings[0].message.args[0] == warn_msg
assert caught_warnings[0].filename == __file__
assert_array_equal(small_array / 2, raw / 2)
with np.errstate(divide='ignore'):
raw_res = 30 / raw
with pytest.warns(RuntimeWarning) as caught_warnings:
res = 30 / small_array
assert_array_equal(res, raw_res)
assert len(caught_warnings) == 1
assert caught_warnings[0].message.args[0] == "divide by zero encountered during operation"
assert caught_warnings[0].filename == __file__
assert_array_equal(30 / (small_array + 1), 30 / (raw + 1))
raw_int = raw.astype(int)
la_int = Array(raw_int, axes=(sex, lipro))
assert_array_equal(la_int / 2, raw_int / 2)
assert_array_equal(la_int // 2, raw_int // 2)
# test adding two larrays with different axes order
assert_array_equal(small_array + small_array.transpose(), raw * 2)
# mixed operations
raw2 = raw / 2
la_raw2 = small_array - raw2
assert la_raw2.axes == small_array.axes
assert_array_equal(la_raw2, raw - raw2)
raw2_la = raw2 - small_array
assert raw2_la.axes == small_array.axes
assert_array_equal(raw2_la, raw2 - raw)
la_ge_raw2 = small_array >= raw2
assert la_ge_raw2.axes == small_array.axes
assert_array_equal(la_ge_raw2, raw >= raw2)
raw2_ge_la = raw2 >= small_array
assert raw2_ge_la.axes == small_array.axes
assert_array_equal(raw2_ge_la, raw2 >= raw)
def test_binary_ops_no_name_axes(small_array):
raw = small_array.data
raw2 = small_array.data + 1
la = ndtest([Axis(l) for l in small_array.shape])
la2 = ndtest([Axis(l) for l in small_array.shape]) + 1
assert_array_equal(la + la2, raw + raw2)
assert_array_equal(la + 1, raw + 1)
assert_array_equal(1 + la, 1 + raw)
assert_array_equal(la - la2, raw - raw2)
assert_array_equal(la - 1, raw - 1)
assert_array_equal(1 - la, 1 - raw)
assert_array_equal(la * la2, raw * raw2)
assert_array_equal(la * 2, raw * 2)
assert_array_equal(2 * la, 2 * raw)
assert_array_nan_equal(la / la2, raw / raw2)
assert_array_equal(la / 2, raw / 2)
with np.errstate(divide='ignore'):
raw_res = 30 / raw
with pytest.warns(RuntimeWarning) as caught_warnings:
res = 30 / la
assert_array_equal(res, raw_res)
assert len(caught_warnings) == 1
assert caught_warnings[0].message.args[0] == "divide by zero encountered during operation"
assert caught_warnings[0].filename == __file__
assert_array_equal(30 / (la + 1), 30 / (raw + 1))
raw_int = raw.astype(int)
la_int = Array(raw_int)
assert_array_equal(la_int / 2, raw_int / 2)
assert_array_equal(la_int // 2, raw_int // 2)
# adding two larrays with different axes order cannot work with unnamed axes
# assert_array_equal(la + la.transpose(), raw * 2)
# mixed operations
raw2 = raw / 2
la_raw2 = la - raw2
assert la_raw2.axes == la.axes
assert_array_equal(la_raw2, raw - raw2)
raw2_la = raw2 - la
assert raw2_la.axes == la.axes
assert_array_equal(raw2_la, raw2 - raw)
la_ge_raw2 = la >= raw2
assert la_ge_raw2.axes == la.axes
assert_array_equal(la_ge_raw2, raw >= raw2)
raw2_ge_la = raw2 >= la
assert raw2_ge_la.axes == la.axes
assert_array_equal(raw2_ge_la, raw2 >= raw)
def test_broadcasting_no_name():
a = ndtest([Axis(2), Axis(3)])
b = ndtest(Axis(3))
c = ndtest(Axis(2))
with pytest.raises(ValueError):
# ValueError: incompatible axes:
# Axis(None, [0, 1, 2])
# vs
# Axis(None, [0, 1])
a * b
d = a * c
assert d.shape == (2, 3)
# {0}*\{1}* 0 1 2
# 0 0 0 0
# 1 3 4 5
assert np.array_equal(d, [[0, 0, 0],
[3, 4, 5]])
# it is unfortunate that the behavior is different from numpy (even though I find our behavior more intuitive)
d = np.asarray(a) * np.asarray(b)
assert d.shape == (2, 3)
assert np.array_equal(d, [[0, 1, 4],
[0, 4, 10]])
with pytest.raises(ValueError):
# ValueError: operands could not be broadcast together with shapes (2,3) (2,)
np.asarray(a) * np.asarray(c)
def test_binary_ops_with_scalar_group():
time = Axis('time=2015..2019')
arr = ndtest(3)
expected = arr + 2015
assert_larray_equal(time.i[0] + arr, expected)
assert_larray_equal(arr + time.i[0], expected)
def test_unary_ops(small_array):
raw = small_array.data
# using numpy functions
assert_array_equal(np.abs(small_array - 10), np.abs(raw - 10))
assert_array_equal(np.negative(small_array), np.negative(raw))
assert_array_equal(np.invert(small_array), np.invert(raw))
# using python builtin ops
assert_array_equal(abs(small_array - 10), abs(raw - 10))
assert_array_equal(-small_array, -raw)
assert_array_equal(+small_array, +raw)
assert_array_equal(~small_array, ~raw)
def test_mean(small_array):
raw = small_array.data
sex, lipro = small_array.axes
assert_array_equal(small_array.mean(lipro), raw.mean(1))
def test_sequence():
res = sequence('b=b0..b2', ndtest(3) * 3, 1.0)
assert_array_equal(ndtest((3, 3), dtype=float), res)
def test_sort_values():
# 1D arrays
arr = Array([0, 1, 6, 3, -1], "a=a0..a4")
res = arr.sort_values()
expected = Array([-1, 0, 1, 3, 6], "a=a4,a0,a1,a3,a2")
assert_array_equal(res, expected)
# ascending arg
res = arr.sort_values(ascending=False)
expected = Array([6, 3, 1, 0, -1], "a=a2,a3,a1,a0,a4")
assert_array_equal(res, expected)
# 3D arrays
arr = Array([[[10, 2, 4], [3, 7, 1]], [[5, 1, 6], [2, 8, 9]]],
'a=a0,a1; b=b0,b1; c=c0..c2')
res = arr.sort_values(axis='c')
expected = Array([[[2, 4, 10], [1, 3, 7]], [[1, 5, 6], [2, 8, 9]]],
[Axis('a=a0,a1'), Axis('b=b0,b1'), Axis(3, 'c')])
assert_array_equal(res, expected)
def test_set_labels(small_array):
small_array.set_labels(X.sex, ['Man', 'Woman'], inplace=True)
expected = small_array.set_labels(X.sex, ['Man', 'Woman'])
assert_array_equal(small_array, expected)
def test_set_axes(small_array):
lipro2 = Axis([l.replace('P', 'Q') for l in lipro.labels], 'lipro2')
sex2 = Axis(['Man', 'Woman'], 'sex2')
la = Array(small_array.data, axes=(sex, lipro2))
# replace one axis
la2 = small_array.set_axes(X.lipro, lipro2)
assert_array_equal(la, la2)
la = Array(small_array.data, axes=(sex2, lipro2))
# all at once
la2 = small_array.set_axes([sex2, lipro2])
assert_array_equal(la, la2)
# using keywrods args
la2 = small_array.set_axes(sex=sex2, lipro=lipro2)
assert_array_equal(la, la2)
# using dict
la2 = small_array.set_axes({X.sex: sex2, X.lipro: lipro2})
assert_array_equal(la, la2)
# using list of pairs (axis_to_replace, new_axis)
la2 = small_array.set_axes([(X.sex, sex2), (X.lipro, lipro2)])
assert_array_equal(la, la2)
def test_reindex():
arr = ndtest((2, 2))
res = arr.reindex(X.b, ['b1', 'b2', 'b0'], fill_value=-1)
assert_array_equal(res, from_string("""a\\b b1 b2 b0
a0 1 -1 0
a1 3 -1 2"""))
arr2 = ndtest((2, 2))
arr2.reindex(X.b, ['b1', 'b2', 'b0'], fill_value=-1, inplace=True)
assert_array_equal(arr2, from_string("""a\\b b1 b2 b0
a0 1 -1 0
a1 3 -1 2"""))
# Array fill value
filler = ndtest(arr.a)
res = arr.reindex(X.b, ['b1', 'b2', 'b0'], fill_value=filler)
assert_array_equal(res, from_string("""a\\b b1 b2 b0
a0 1 0 0
a1 3 1 2"""))
# using labels from another array
arr = ndtest('a=v0..v2;b=v0,v2,v1,v3')
res = arr.reindex('a', arr.b.labels, fill_value=-1)
assert_array_equal(res, from_string("""a\\b v0 v2 v1 v3
v0 0 1 2 3
v2 8 9 10 11
v1 4 5 6 7
v3 -1 -1 -1 -1"""))
res = arr.reindex('a', arr.b, fill_value=-1)
assert_array_equal(res, from_string("""a\\b v0 v2 v1 v3
v0 0 1 2 3
v2 8 9 10 11
v1 4 5 6 7
v3 -1 -1 -1 -1"""))
# passing a list of Axis
arr = ndtest((2, 2))
res = arr.reindex([Axis("a=a0,a1"), Axis("c=c0"), Axis("b=b1,b2")], fill_value=-1)
assert_array_equal(res, from_string(""" a b\\c c0
a0 b1 1
a0 b2 -1
a1 b1 3
a1 b2 -1"""))
def test_expand():
country = Axis("country=BE,FR,DE")
arr = ndtest(country)
out1 = empty((sex, country))
arr.expand(out=out1)
out2 = empty((sex, country))
out2[:] = arr
assert_array_equal(out1, out2)
def test_append(small_array):
sex, lipro = small_array.axes
small_array = small_array.append(lipro, small_array.sum(lipro), label='sum')
assert small_array.shape == (2, 16)
small_array = small_array.append(sex, small_array.sum(sex), label='sum')
assert small_array.shape == (3, 16)
# crap the sex axis is different !!!! we don't have this problem with
# the kwargs syntax below
# small_array = small_array.append(small_array.mean(sex), axis=sex, label='mean')
# self.assertEqual(small_array.shape, (4, 16))
# another syntax (which implies we could not have an axis named "label")
# small_array = small_array.append(lipro=small_array.sum(lipro), label='sum')
# self.assertEqual(small_array.shape, (117, 44, 2, 15))
def test_insert():
# simple tests are in the docstring
arr1 = ndtest((2, 3))
# insert at multiple places at once
# we cannot use from_string in these tests because it deduplicates ambiguous (column) labels automatically
res = arr1.insert([42, 43], before='b1', label='new')
assert_array_equal(res, from_lists([
['a\\b', 'b0', 'new', 'new', 'b1', 'b2'],
['a0', 0, 42, 43, 1, 2],
['a1', 3, 42, 43, 4, 5]]))
res = arr1.insert(42, before=['b1', 'b2'], label='new')
assert_array_equal(res, from_lists([
['a\\b', 'b0', 'new', 'b1', 'new', 'b2'],
['a0', 0, 42, 1, 42, 2],
['a1', 3, 42, 4, 42, 5]]))
res = arr1.insert(42, before='b1', label=['b0.1', 'b0.2'])
assert_array_equal(res, from_string(r"""
a\b b0 b0.1 b0.2 b1 b2
a0 0 42 42 1 2
a1 3 42 42 4 5"""))
res = arr1.insert(42, before=['b1', 'b2'], label=['b0.5', 'b1.5'])
assert_array_equal(res, from_string(r"""
a\b b0 b0.5 b1 b1.5 b2
a0 0 42 1 42 2
a1 3 42 4 42 5"""))
res = arr1.insert([42, 43], before='b1', label=['b0.1', 'b0.2'])
assert_array_equal(res, from_string(r"""
a\b b0 b0.1 b0.2 b1 b2
a0 0 42 43 1 2
a1 3 42 43 4 5"""))
res = arr1.insert([42, 43], before=['b1', 'b2'], label='new')
assert_array_equal(res, from_lists([
['a\\b', 'b0', 'new', 'b1', 'new', 'b2'],
[ 'a0', 0, 42, 1, 43, 2],
[ 'a1', 3, 42, 4, 43, 5]]))
res = arr1.insert([42, 43], before=['b1', 'b2'], label=['b0.5', 'b1.5'])
assert_array_equal(res, from_string(r"""
a\b b0 b0.5 b1 b1.5 b2
a0 0 42 1 43 2
a1 3 42 4 43 5"""))
res = arr1.insert([42, 43], before='b1,b2', label=['b0.5', 'b1.5'])
assert_array_equal(res, from_string(r"""
a\b b0 b0.5 b1 b1.5 b2
a0 0 42 1 43 2
a1 3 42 4 43 5"""))
arr2 = ndtest(2)
res = arr1.insert([arr2 + 42, arr2 + 43], before=['b1', 'b2'], label=['b0.5', 'b1.5'])
assert_array_equal(res, from_string(r"""
a\b b0 b0.5 b1 b1.5 b2
a0 0 42 1 43 2
a1 3 43 4 44 5"""))
arr3 = ndtest('a=a0,a1;b=b0.1,b0.2') + 42
res = arr1.insert(arr3, before='b1,b2')
assert_array_equal(res, from_string(r"""
a\b b0 b0.1 b1 b0.2 b2
a0 0 42 1 43 2
a1 3 44 4 45 5"""))
# with ambiguous labels
arr4 = ndtest('a=v0,v1;b=v0,v1')
expected = from_string(r"""
a\b v0 v0.5 v1
v0 0 42 1
v1 2 42 3""")
res = arr4.insert(42, before='b[v1]', label='v0.5')
assert_array_equal(res, expected)
res = arr4.insert(42, before=X.b['v1'], label='v0.5')
assert_array_equal(res, expected)
res = arr4.insert(42, before=arr4.b['v1'], label='v0.5')
assert_array_equal(res, expected)
def test_drop():
arr1 = ndtest(3)
expected = Array([0, 2], 'a=a0,a2')
# indices
res = arr1.drop('a.i[1]')
assert_array_equal(res, expected)
res = arr1.drop(X.a.i[1])
assert_array_equal(res, expected)
# labels
res = arr1.drop(X.a['a1'])
assert_array_equal(res, expected)
res = arr1.drop('a[a1]')
assert_array_equal(res, expected)
# 2D array
arr2 = ndtest((2, 4))
expected = from_string(r"""
a\b b0 b2
a0 0 2
a1 4 6""")
res = arr2.drop(['b1', 'b3'])
assert_array_equal(res, expected)
res = arr2.drop(X.b['b1', 'b3'])
assert_array_equal(res, expected)
res = arr2.drop('b.i[1, 3]')
assert_array_equal(res, expected)
res = arr2.drop(X.b.i[1, 3])
assert_array_equal(res, expected)
a = Axis('a=label0..label2')
b = Axis('b=label0..label2')
arr3 = ndtest((a, b))
res = arr3.drop('a[label1]')
assert_array_equal(res, from_string(r"""
a\b label0 label1 label2
label0 0 1 2
label2 6 7 8"""))
# XXX: implement the following (#671)?
# res = arr3.drop('0[label1]')
res = arr3.drop(X[0]['label1'])
assert_array_equal(res, from_string(r"""
a\b label0 label1 label2
label0 0 1 2
label2 6 7 8"""))
res = arr3.drop(a['label1'])
assert_array_equal(res, from_string(r"""
a\b label0 label1 label2
label0 0 1 2
label2 6 7 8"""))
# the aim of this test is to drop the last value of an axis, but instead
# of dropping the last axis tick/label, drop the first one.
def test_shift_axis(small_array):
sex, lipro = small_array.axes
# TODO: check how awful the syntax is with an axis that is not last
# or first
l2 = Array(small_array[:, :'P14'], axes=[sex, Axis(lipro.labels[1:], 'lipro')])
l2 = Array(small_array[:, :'P14'], axes=[sex, lipro.subaxis(slice(1, None))])
# We can also modify the axis in-place (dangerous!)
# lipro.labels = np.append(lipro.labels[1:], lipro.labels[0])
l2 = small_array[:, 'P02':]
l2.axes.lipro.labels = lipro.labels[1:]
def test_unique():
arr = Array([[[0, 2, 0, 0],
[1, 1, 1, 0]],
[[0, 2, 0, 0],
[2, 1, 2, 0]]], 'a=a0,a1;b=b0,b1;c=c0..c3')
assert_array_equal(arr.unique('a'), arr)
assert_array_equal(arr.unique('b'), arr)
assert_array_equal(arr.unique('c'), arr['c0,c1,c3'])
expected = from_string("""\
a_b\\c c0 c1 c2 c3
a0_b0 0 2 0 0
a0_b1 1 1 1 0
a1_b1 2 1 2 0""")
assert_array_equal(arr.unique(('a', 'b')), expected)
def test_extend(small_array):
sex, lipro = small_array.axes
all_lipro = lipro[:]
tail = small_array.sum(lipro=(all_lipro,))
small_array = small_array.extend(lipro, tail)
assert small_array.shape == (2, 16)
# test with a string axis
small_array = small_array.extend('sex', small_array.sum(sex=(sex[:],)))
assert small_array.shape == (3, 16)
@needs_pytables
def test_hdf_roundtrip(tmpdir, meta):
a = ndtest((2, 3), meta=meta)
fpath = tmp_path(tmpdir, 'test.h5')
a.to_hdf(fpath, 'a')
res = read_hdf(fpath, 'a')
assert a.ndim == 2
assert a.shape == (2, 3)
assert a.axes.names == ['a', 'b']
assert_array_equal(res, a)
assert res.meta == a.meta
# issue 72: int-like strings should not be parsed (should round-trip correctly)
fpath = tmp_path(tmpdir, 'issue72.h5')
a = from_lists([['axis', '10', '20'],
['', 0, 1]])
a.to_hdf(fpath, 'a')
res = read_hdf(fpath, 'a')
assert res.ndim == 1
axis = res.axes[0]
assert axis.name == 'axis'
assert_array_equal(axis.labels, ['10', '20'])
# passing group as key to to_hdf
a3 = ndtest((4, 3, 4))
fpath = tmp_path(tmpdir, 'test.h5')
os.remove(fpath)
# single element group
for label in a3.a:
a3[label].to_hdf(fpath, label)
# unnamed group
group = a3.c['c0,c2']
a3[group].to_hdf(fpath, group)
# unnamed group + slice
group = a3.c['c0::2']
a3[group].to_hdf(fpath, group)
# named group
group = a3.c['c0,c2'] >> 'even'
a3[group].to_hdf(fpath, group)
# group with name containing special characters (replaced by _)
group = a3.c['c0,c2'] >> r':name?with*special/\[characters]'
a3[group].to_hdf(fpath, group)
# passing group as key to read_hdf
for label in a3.a:
subset = read_hdf(fpath, label)
assert_array_equal(subset, a3[label])
# load Session
from larray.core.session import Session
s = Session(fpath)
assert s.names == sorted(['a0', 'a1', 'a2', 'a3', 'c0,c2', 'c0::2', 'even', ':name?with*special__[characters]'])
def test_from_string():
expected = ndtest("sex=M,F")
res = from_string('''sex M F
\t 0 1''')
assert_array_equal(res, expected)
res = from_string('''sex M F
nan 0 1''')
assert_array_equal(res, expected)
res = from_string('''sex M F
NaN 0 1''')
assert_array_equal(res, expected)
def test_read_csv():
res = read_csv(inputpath('test1d.csv'))
assert_array_equal(res, io_1d)
res = read_csv(inputpath('test2d.csv'))
assert_array_equal(res, io_2d)
res = read_csv(inputpath('test3d.csv'))
assert_array_equal(res, io_3d)
res = read_csv(inputpath('testint_labels.csv'))
assert_array_equal(res, io_int_labels)
res = read_csv(inputpath('test2d_classic.csv'))
assert_array_equal(res, ndtest("a=a0..a2; b0..b2"))
la = read_csv(inputpath('test1d_liam2.csv'), dialect='liam2')
assert la.ndim == 1
assert la.shape == (3,)
assert la.axes.names == ['time']
assert_array_equal(la, [3722, 3395, 3347])
la = read_csv(inputpath('test5d_liam2.csv'), dialect='liam2')
assert la.ndim == 5
assert la.shape == (2, 5, 2, 2, 3)
assert la.axes.names == ['arr', 'age', 'sex', 'nat', 'time']
assert_array_equal(la[X.arr[1], 0, 'F', X.nat[1], :], [3722, 3395, 3347])
# missing values
res = read_csv(inputpath('testmissing_values.csv'))
assert_array_nan_equal(res, io_missing_values)
# test StringIO
res = read_csv(StringIO('a,1,2\n,0,1\n'))
assert_array_equal(res, ndtest('a=1,2'))
# sort_columns=True
res = read_csv(StringIO('a,a2,a0,a1\n,2,0,1\n'), sort_columns=True)
assert_array_equal(res, ndtest(3))
#################
# narrow format #
#################
res = read_csv(inputpath('test1d_narrow.csv'), wide=False)
assert_array_equal(res, io_1d)
res = read_csv(inputpath('test2d_narrow.csv'), wide=False)
assert_array_equal(res, io_2d)
res = read_csv(inputpath('test3d_narrow.csv'), wide=False)
assert_array_equal(res, io_3d)
# missing values
res = read_csv(inputpath('testmissing_values_narrow.csv'), wide=False)
assert_array_nan_equal(res, io_narrow_missing_values)
# unsorted values
res = read_csv(inputpath('testunsorted_narrow.csv'), wide=False)
assert_array_equal(res, io_unsorted)
def test_read_eurostat():
la = read_eurostat(inputpath('test5d_eurostat.csv'))
assert la.ndim == 5
assert la.shape == (2, 5, 2, 2, 3)
assert la.axes.names == ['arr', 'age', 'sex', 'nat', 'time']
# FIXME: integer labels should be parsed as such
assert_array_equal(la[X.arr['1'], '0', 'F', X.nat['1'], :],
[3722, 3395, 3347])
@needs_xlwings
def test_read_excel_xlwings():
arr = read_excel(inputpath('test.xlsx'), '1d')
assert_array_equal(arr, io_1d)
arr = read_excel(inputpath('test.xlsx'), '2d')
assert_array_equal(arr, io_2d)
arr = read_excel(inputpath('test.xlsx'), '2d_classic')
assert_array_equal(arr, ndtest("a=a0..a2; b0..b2"))
arr = read_excel(inputpath('test.xlsx'), '2d_classic', nb_axes=2)
assert_array_equal(arr, ndtest("a=a0..a2; b0..b2"))
arr = read_excel(inputpath('test.xlsx'), '3d')
assert_array_equal(arr, io_3d)
# for > 2d, specifying nb_axes is required if there is no name for the horizontal axis
arr = read_excel(inputpath('test.xlsx'), '3d_classic', nb_axes=3)
assert_array_equal(arr, ndtest("a=1..3; b=b0,b1; c0..c2"))
arr = read_excel(inputpath('test.xlsx'), 'int_labels')
assert_array_equal(arr, io_int_labels)
# passing a Group as sheet arg
axis = Axis('dim=1d,2d,3d,5d')
arr = read_excel(inputpath('test.xlsx'), axis['1d'])
assert_array_equal(arr, io_1d)
# missing rows, default fill_value
arr = read_excel(inputpath('test.xlsx'), 'missing_values')
expected = ndtest("a=1..3; b=b0,b1; c=c0..c2", dtype=float)
expected[2, 'b0'] = nan
expected[3, 'b1'] = nan
assert_array_nan_equal(arr, expected)
# missing rows + fill_value argument
arr = read_excel(inputpath('test.xlsx'), 'missing_values', fill_value=42)
expected = ndtest("a=1..3; b=b0,b1; c=c0..c2", dtype=float)
expected[2, 'b0'] = 42
expected[3, 'b1'] = 42
assert_array_equal(arr, expected)
# range
arr = read_excel(inputpath('test.xlsx'), 'position', range='D3:H9')
assert_array_equal(arr, io_3d)
#################
# narrow format #
#################
arr = read_excel(inputpath('test_narrow.xlsx'), '1d', wide=False)
assert_array_equal(arr, io_1d)
arr = read_excel(inputpath('test_narrow.xlsx'), '2d', wide=False)
assert_array_equal(arr, io_2d)
arr = read_excel(inputpath('test_narrow.xlsx'), '3d', wide=False)
assert_array_equal(arr, io_3d)
# missing rows + fill_value argument
arr = read_excel(inputpath('test_narrow.xlsx'), 'missing_values', fill_value=42, wide=False)
expected = io_narrow_missing_values.copy()
expected[isnan(expected)] = 42
assert_array_equal(arr, expected)
# unsorted values
arr = read_excel(inputpath('test_narrow.xlsx'), 'unsorted', wide=False)
assert_array_equal(arr, io_unsorted)
# range
arr = read_excel(inputpath('test_narrow.xlsx'), 'position', range='D3:G21', wide=False)
assert_array_equal(arr, io_3d)
##############################
# invalid keyword argument #
##############################
with pytest.raises(TypeError, match="'dtype' is an invalid keyword argument for this function "
"when using the xlwings backend"):
read_excel(inputpath('test.xlsx'), engine='xlwings', dtype=float)
#################
# blank cells #
#################
# Excel sheet with blank cells on right/bottom border of the array to read
fpath = inputpath('test_blank_cells.xlsx')
good = read_excel(fpath, 'good')
bad1 = read_excel(fpath, 'blanksafter_morerowsthancols')
bad2 = read_excel(fpath, 'blanksafter_morecolsthanrows')
assert_array_equal(bad1, good)
assert_array_equal(bad2, good)
# with additional empty column in the middle of the array to read
good2 = ndtest('a=a0,a1;b=2003..2006').astype(object)
good2[2005] = None
good2 = good2.set_axes('b', Axis([2003, 2004, None, 2006], 'b'))
bad3 = read_excel(fpath, 'middleblankcol')
bad4 = read_excel(fpath, '16384col')
assert_array_equal(bad3, good2)
assert_array_equal(bad4, good2)
@needs_xlrd
def test_read_excel_pandas():
arr = read_excel(inputpath('test.xlsx'), '1d', engine='xlrd')
assert_array_equal(arr, io_1d)
arr = read_excel(inputpath('test.xlsx'), '2d', engine='xlrd')
assert_array_equal(arr, io_2d)
arr = read_excel(inputpath('test.xlsx'), '2d', nb_axes=2, engine='xlrd')
assert_array_equal(arr, io_2d)
arr = read_excel(inputpath('test.xlsx'), '2d_classic', engine='xlrd')
assert_array_equal(arr, ndtest("a=a0..a2; b0..b2"))
arr = read_excel(inputpath('test.xlsx'), '2d_classic', nb_axes=2, engine='xlrd')
assert_array_equal(arr, ndtest("a=a0..a2; b0..b2"))
arr = read_excel(inputpath('test.xlsx'), '3d', index_col=[0, 1], engine='xlrd')
assert_array_equal(arr, io_3d)
arr = read_excel(inputpath('test.xlsx'), '3d', engine='xlrd')
assert_array_equal(arr, io_3d)
# for > 2d, specifying nb_axes is required if there is no name for the horizontal axis
arr = read_excel(inputpath('test.xlsx'), '3d_classic', nb_axes=3, engine='xlrd')
assert_array_equal(arr, ndtest("a=1..3; b=b0,b1; c0..c2"))
arr = read_excel(inputpath('test.xlsx'), 'int_labels', engine='xlrd')
assert_array_equal(arr, io_int_labels)
# passing a Group as sheet arg
axis = Axis('dim=1d,2d,3d,5d')
arr = read_excel(inputpath('test.xlsx'), axis['1d'], engine='xlrd')
assert_array_equal(arr, io_1d)
# missing rows + fill_value argument
arr = read_excel(inputpath('test.xlsx'), 'missing_values', fill_value=42, engine='xlrd')
expected = io_missing_values.copy()
expected[isnan(expected)] = 42
assert_array_equal(arr, expected)
#################
# narrow format #
#################
arr = read_excel(inputpath('test_narrow.xlsx'), '1d', wide=False, engine='xlrd')
assert_array_equal(arr, io_1d)
arr = read_excel(inputpath('test_narrow.xlsx'), '2d', wide=False, engine='xlrd')
assert_array_equal(arr, io_2d)
arr = read_excel(inputpath('test_narrow.xlsx'), '3d', wide=False, engine='xlrd')
assert_array_equal(arr, io_3d)
# missing rows + fill_value argument
arr = read_excel(inputpath('test_narrow.xlsx'), 'missing_values',
fill_value=42, wide=False, engine='xlrd')
expected = io_narrow_missing_values.copy()
expected[isnan(expected)] = 42
assert_array_equal(arr, expected)
# unsorted values
arr = read_excel(inputpath('test_narrow.xlsx'), 'unsorted', wide=False, engine='xlrd')
assert_array_equal(arr, io_unsorted)
#################
# blank cells #
#################
# Excel sheet with blank cells on right/bottom border of the array to read
fpath = inputpath('test_blank_cells.xlsx')
good1 = read_excel(fpath, 'good', engine='xlrd')
bad1 = read_excel(fpath, 'blanksafter_morerowsthancols', engine='xlrd')
bad2 = read_excel(fpath, 'blanksafter_morecolsthanrows', engine='xlrd')
assert_array_equal(bad1, good1)
assert_array_equal(bad2, good1)
# with additional empty column in the middle of the array to read
good2 = ndtest('a=a0,a1;b=2003..2006').astype(float)
good2[2005] = nan
good2 = good2.set_axes('b', Axis([2003, 2004, 'Unnamed: 3', 2006], 'b'))
bad3 = read_excel(fpath, 'middleblankcol', engine='xlrd')
bad4 = read_excel(fpath, '16384col', engine='xlrd')
assert_array_nan_equal(bad3, good2)
assert_array_nan_equal(bad4, good2)
def test_from_lists():
simple_arr = ndtest((2, 2, 3))
# simple
arr_list = [['a', 'b\\c', 'c0', 'c1', 'c2'],
['a0', 'b0', 0, 1, 2],
['a0', 'b1', 3, 4, 5],
['a1', 'b0', 6, 7, 8],
['a1', 'b1', 9, 10, 11]]
res = from_lists(arr_list)
assert_array_equal(res, simple_arr)
# simple (using dump). This should be the same test than above.
# We just make sure dump() and from_lists() round-trip correctly.
arr_list = simple_arr.dump()
res = from_lists(arr_list)
assert_array_equal(res, simple_arr)
# with anonymous axes
arr_anon = simple_arr.rename({0: None, 1: None, 2: None})
arr_list = arr_anon.dump()
assert arr_list == [[None, None, 'c0', 'c1', 'c2'],
['a0', 'b0', 0, 1, 2],
['a0', 'b1', 3, 4, 5],
['a1', 'b0', 6, 7, 8],
['a1', 'b1', 9, 10, 11]]
res = from_lists(arr_list, nb_axes=3)
assert_array_equal(res, arr_anon)
# with empty ('') axes names
arr_empty_names = simple_arr.rename({0: '', 1: '', 2: ''})
arr_list = arr_empty_names.dump()
assert arr_list == [[ '', '', 'c0', 'c1', 'c2'],
['a0', 'b0', 0, 1, 2],
['a0', 'b1', 3, 4, 5],
['a1', 'b0', 6, 7, 8],
['a1', 'b1', 9, 10, 11]]
res = from_lists(arr_list, nb_axes=3)
# this is purposefully NOT arr_empty_names because from_lists (via df_asarray) transforms '' axes to None
assert_array_equal(res, arr_anon)
# sort_rows
arr = from_lists([['sex', 'nat\\year', 1991, 1992, 1993],
['F', 'BE', 0, 0, 1],
['F', 'FO', 0, 0, 2],
['M', 'BE', 1, 0, 0],
['M', 'FO', 2, 0, 0]])
sorted_arr = from_lists([['sex', 'nat\\year', 1991, 1992, 1993],
['M', 'BE', 1, 0, 0],
['M', 'FO', 2, 0, 0],
['F', 'BE', 0, 0, 1],
['F', 'FO', 0, 0, 2]], sort_rows=True)
assert_array_equal(sorted_arr, arr)
# sort_columns
arr = from_lists([['sex', 'nat\\year', 1991, 1992, 1993],
['M', 'BE', 1, 0, 0],
['M', 'FO', 2, 0, 0],
['F', 'BE', 0, 0, 1],
['F', 'FO', 0, 0, 2]])
sorted_arr = from_lists([['sex', 'nat\\year', 1992, 1991, 1993],
['M', 'BE', 0, 1, 0],
['M', 'FO', 0, 2, 0],
['F', 'BE', 0, 0, 1],
['F', 'FO', 0, 0, 2]], sort_columns=True)
assert_array_equal(sorted_arr, arr)
def test_from_series():
# Series with Index as index
expected = ndtest(3)
s = pd.Series([0, 1, 2], index=pd.Index(['a0', 'a1', 'a2'], name='a'))
assert_array_equal(from_series(s), expected)
s = pd.Series([2, 0, 1], index=pd.Index(['a2', 'a0', 'a1'], name='a'))
assert_array_equal(from_series(s, sort_rows=True), expected)
expected = ndtest(3)[['a2', 'a0', 'a1']]
assert_array_equal(from_series(s), expected)
# Series with MultiIndex as index
age = Axis('age=0..3')
gender = Axis('gender=M,F')
time = Axis('time=2015..2017')
expected = ndtest((age, gender, time))
index = pd.MultiIndex.from_product(expected.axes.labels, names=expected.axes.names)
data = expected.data.flatten()
s = pd.Series(data, index)
res = from_series(s)
assert_array_equal(res, expected)
res = from_series(s, sort_rows=True)
assert_array_equal(res, expected.sort_axes())
expected[0, 'F'] = -1
s = s.reset_index().drop([3, 4, 5]).set_index(['age', 'gender', 'time'])[0]
res = from_series(s, fill_value=-1)
assert_array_equal(res, expected)
def test_from_frame():
# 1) data = scalar
# ================
# Dataframe becomes 1D Array
data = np.array([10])
index = ['i0']
columns = ['c0']
axis_index, axis_columns = Axis(index), Axis(columns)
df = pd.DataFrame(data, index=index, columns=columns)
assert df.index.name is None
assert df.columns.name is None
assert list(df.index.values) == index
assert list(df.columns.values) == columns
# anonymous indexes/columns
# input dataframe:
# ----------------
# c0
# i0 10
# output Array:
# -------------
# {0}\{1} c0
# i0 10
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, 1)
assert la.axes.names == [None, None]
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, 1)), [axis_index, axis_columns])
assert_array_equal(la, expected_la)
# anonymous columns
# input dataframe:
# ----------------
# c0
# index
# i0 10
# output Array:
# -------------
# index\{1} c0
# i0 10
df.index.name, df.columns.name = 'index', None
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, 1)
assert la.axes.names == ['index', None]
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, 1)), [axis_index.rename('index'), axis_columns])
assert_array_equal(la, expected_la)
# anonymous columns/non string row axis name
# input dataframe:
# ----------------
# c0
# 0
# i0 10
# output Array:
# -------------
# 0\{1} c0
# i0 10
df = pd.DataFrame([10], index=pd.Index(['i0'], name=0), columns=['c0'])
res = from_frame(df)
expected = Array([[10]], [Axis(['i0'], name=0), Axis(['c0'])])
assert res.ndim == 2
assert res.shape == (1, 1)
assert res.axes.names == [0, None]
assert_array_equal(res, expected)
# anonymous index
# input dataframe:
# ----------------
# columns c0
# i0 10
# output Array:
# -------------
# {0}\columns c0
# i0 10
df.index.name, df.columns.name = None, 'columns'
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, 1)
assert la.axes.names == [None, 'columns']
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, 1)), [axis_index, axis_columns.rename('columns')])
assert_array_equal(la, expected_la)
# index and columns with name
# input dataframe:
# ----------------
# columns c0
# index
# i0 10
# output Array:
# -------------
# index\columns c0
# i0 10
df.index.name, df.columns.name = 'index', 'columns'
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, 1)
assert la.axes.names == ['index', 'columns']
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, 1)), [axis_index.rename('index'), axis_columns.rename('columns')])
assert_array_equal(la, expected_la)
# 2) data = vector
# ================
size = 3
# 2A) data = horizontal vector (1 x N)
# ====================================
# Dataframe becomes 1D Array
data = np.arange(size)
indexes = ['i0']
columns = ['c{}'.format(i) for i in range(size)]
axis_index, axis_columns = Axis(indexes), Axis(columns)
df = pd.DataFrame(data.reshape(1, size), index=indexes, columns=columns)
assert df.index.name is None
assert df.columns.name is None
assert list(df.index.values) == indexes
assert list(df.columns.values) == columns
# anonymous indexes/columns
# input dataframe:
# ----------------
# c0 c1 c2
# i0 0 1 2
# output Array:
# -------------
# {0}\{1} c0 c1 c2
# i0 0 1 2
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, size)
assert la.axes.names == [None, None]
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, size)), [axis_index, axis_columns])
assert_array_equal(la, expected_la)
# anonymous columns
# input dataframe:
# ----------------
# c0 c1 c2
# index
# i0 0 1 2
# output Array:
# -------------
# index\{1} c0 c1 c2
# i0 0 1 2
df.index.name, df.columns.name = 'index', None
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, size)
assert la.axes.names == ['index', None]
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, size)), [axis_index.rename('index'), axis_columns])
assert_array_equal(la, expected_la)
# anonymous index
# input dataframe:
# ----------------
# columns c0 c1 c2
# i0 0 1 2
# output Array:
# -------------
# {0}\columns c0 c1 c2
# i0 0 1 2
df.index.name, df.columns.name = None, 'columns'
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, size)
assert la.axes.names == [None, 'columns']
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, size)), [axis_index, axis_columns.rename('columns')])
assert_array_equal(la, expected_la)
# index and columns with name
# input dataframe:
# ----------------
# columns c0 c1 c2
# index
# i0 0 1 2
# output Array:
# -------------
# index\columns c0 c1 c2
# i0 0 1 2
df.index.name, df.columns.name = 'index', 'columns'
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, size)
assert la.axes.names == ['index', 'columns']
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, size)), [axis_index.rename('index'), axis_columns.rename('columns')])
assert_array_equal(la, expected_la)
# 2B) data = vertical vector (N x 1)
# ==================================
# Dataframe becomes 2D Array
data = data.reshape(size, 1)
indexes = ['i{}'.format(i) for i in range(size)]
columns = ['c0']
axis_index, axis_columns = Axis(indexes), Axis(columns)
df = pd.DataFrame(data, index=indexes, columns=columns)
assert df.index.name is None
assert df.columns.name is None
assert list(df.index.values) == indexes
assert list(df.columns.values) == columns
# anonymous indexes/columns
# input dataframe:
# ----------------
# c0
# i0 0
# i1 1
# i2 2
# output Array:
# -------------
# {0}\{1} c0
# i0 0
# i1 1
# i2 2
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (size, 1)
assert la.axes.names == [None, None]
assert list(la.axes.labels[0]) == indexes
assert list(la.axes.labels[1]) == columns
expected_la = Array(data, [axis_index, axis_columns])
assert_array_equal(la, expected_la)
# anonymous columns
# input dataframe:
# ----------------
# c0
# index
# i0 0
# i1 1
# i2 2
# output Array:
# -------------
# index\{1} c0
# i0 0
# i1 1
# i2 2
df.index.name, df.columns.name = 'index', None
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (size, 1)
assert la.axes.names == ['index', None]
assert list(la.axes.labels[0]) == indexes
assert list(la.axes.labels[1]) == columns
expected_la = Array(data, [axis_index.rename('index'), axis_columns])
assert_array_equal(la, expected_la)
# anonymous index
# input dataframe:
# ----------------
# columns c0
# i0 0
# i1 1
# i2 2
# output Array:
# -------------
# {0}\columns c0
# i0 0
# i1 1
# i2 2
df.index.name, df.columns.name = None, 'columns'
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (size, 1)
assert la.axes.names == [None, 'columns']
assert list(la.axes.labels[0]) == indexes
assert list(la.axes.labels[1]) == columns
expected_la = Array(data, [axis_index, axis_columns.rename('columns')])
assert_array_equal(la, expected_la)
# index and columns with name
# input dataframe:
# ----------------
# columns c0
# index
# i0 0
# i1 1
# i2 2
# output Array:
# -------------
# {0}\columns c0
# i0 0
# i1 1
# i2 2
df.index.name, df.columns.name = 'index', 'columns'
assert la.ndim == 2
assert la.shape == (size, 1)
assert la.axes.names == [None, 'columns']
assert list(la.axes.labels[0]) == indexes
assert list(la.axes.labels[1]) == columns
expected_la = Array(data, [axis_index, axis_columns.rename('columns')])
assert_array_equal(la, expected_la)
# 3) 3D array
# ===========
# 3A) Dataframe with 2 index columns
# ==================================
dt = [('age', int), ('sex', 'U1'),
('2007', int), ('2010', int), ('2013', int)]
data = np.array([
(0, 'F', 3722, 3395, 3347),
(0, 'M', 338, 316, 323),
(1, 'F', 2878, 2791, 2822),
(1, 'M', 1121, 1037, 976),
(2, 'F', 4073, 4161, 4429),
(2, 'M', 1561, 1463, 1467),
(3, 'F', 3507, 3741, 3366),
(3, 'M', 2052, 2052, 2118),
], dtype=dt)
df = pd.DataFrame(data)
df.set_index(['age', 'sex'], inplace=True)
df.columns.name = 'time'
la = from_frame(df)
assert la.ndim == 3
assert la.shape == (4, 2, 3)
assert la.axes.names == ['age', 'sex', 'time']
assert_array_equal(la[0, 'F', :], [3722, 3395, 3347])
# 3B) Dataframe with columns.name containing \\
# =============================================
dt = [('age', int), ('sex\\time', 'U1'),
('2007', int), ('2010', int), ('2013', int)]
data = np.array([
(0, 'F', 3722, 3395, 3347),
(0, 'M', 338, 316, 323),
(1, 'F', 2878, 2791, 2822),
(1, 'M', 1121, 1037, 976),
(2, 'F', 4073, 4161, 4429),
(2, 'M', 1561, 1463, 1467),
(3, 'F', 3507, 3741, 3366),
(3, 'M', 2052, 2052, 2118),
], dtype=dt)
df = pd.DataFrame(data)
df.set_index(['age', 'sex\\time'], inplace=True)
la = from_frame(df, unfold_last_axis_name=True)
assert la.ndim == 3
assert la.shape == (4, 2, 3)
assert la.axes.names == ['age', 'sex', 'time']
assert_array_equal(la[0, 'F', :], [3722, 3395, 3347])
# 3C) Dataframe with no axe names (names are None)
# ===============================
arr_no_names = ndtest("a0,a1;b0..b2;c0..c3")
df_no_names = arr_no_names.df
res = from_frame(df_no_names)
assert_array_equal(res, arr_no_names)
# 3D) Dataframe with empty axe names (names are '')
# ==================================
arr_empty_names = ndtest("=a0,a1;=b0..b2;=c0..c3")
assert arr_empty_names.axes.names == ['', '', '']
df_no_names = arr_empty_names.df
res = from_frame(df_no_names)
assert_array_equal(res, arr_empty_names)
# 4) test sort_rows and sort_columns arguments
# ============================================
age = Axis('age=2,0,1,3')
gender = Axis('gender=M,F')
time = Axis('time=2016,2015,2017')
columns = pd.Index(time.labels, name=time.name)
# df.index is an Index instance
expected = ndtest((gender, time))
index = pd.Index(gender.labels, name=gender.name)
data = expected.data
df = pd.DataFrame(data, index=index, columns=columns)
expected = expected.sort_axes()
res = from_frame(df, sort_rows=True, sort_columns=True)
assert_array_equal(res, expected)
# df.index is a MultiIndex instance
expected = ndtest((age, gender, time))
index = pd.MultiIndex.from_product(expected.axes[:-1].labels, names=expected.axes[:-1].names)
data = expected.data.reshape(len(age) * len(gender), len(time))
df = pd.DataFrame(data, index=index, columns=columns)
res = from_frame(df, sort_rows=True, sort_columns=True)
assert_array_equal(res, expected.sort_axes())
# 5) test fill_value
# ==================
expected[0, 'F'] = -1
df = df.reset_index().drop([3]).set_index(['age', 'gender'])
res = from_frame(df, fill_value=-1)
assert_array_equal(res, expected)
def test_to_csv(tmpdir):
arr = io_3d.copy()
arr.to_csv(tmp_path(tmpdir, 'out.csv'))
result = ['a,b\\c,c0,c1,c2\n',
'1,b0,0,1,2\n',
'1,b1,3,4,5\n']
with open(tmp_path(tmpdir, 'out.csv')) as f:
assert f.readlines()[:3] == result
# stacked data (one column containing all the values and another column listing the context of the value)
arr.to_csv(tmp_path(tmpdir, 'out.csv'), wide=False)
result = ['a,b,c,value\n',
'1,b0,c0,0\n',
'1,b0,c1,1\n']
with open(tmp_path(tmpdir, 'out.csv')) as f:
assert f.readlines()[:3] == result
arr = io_1d.copy()
arr.to_csv(tmp_path(tmpdir, 'test_out1d.csv'))
result = ['a,a0,a1,a2\n',
',0,1,2\n']
with open(tmp_path(tmpdir, 'test_out1d.csv')) as f:
assert f.readlines() == result
@needs_xlsxwriter
def test_to_excel_xlsxwriter(tmpdir):
fpath = tmp_path(tmpdir, 'test_to_excel_xlsxwriter.xlsx')
# 1D
a1 = ndtest(3)
# fpath/Sheet1/A1
a1.to_excel(fpath, overwrite_file=True, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# fpath/Sheet1/A1(transposed)
a1.to_excel(fpath, transpose=True, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# fpath/Sheet1/A1
# stacked data (one column containing all the values and another column listing the context of the value)
a1.to_excel(fpath, wide=False, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
stacked_a1 = a1.reshape([a1.a, Axis(['value'])])
assert_array_equal(res, stacked_a1)
# 2D
a2 = ndtest((2, 3))
# fpath/Sheet1/A1
a2.to_excel(fpath, overwrite_file=True, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a2)
# fpath/Sheet1/A10
# TODO: this is currently not supported (though we would only need to translate A10 to startrow=0 and startcol=0
# a2.to_excel('fpath', 'Sheet1', 'A10', engine='xlsxwriter')
# res = read_excel('fpath', 'Sheet1', engine='xlrd', skiprows=9)
# assert_array_equal(res, a2)
# fpath/other/A1
a2.to_excel(fpath, 'other', engine='xlsxwriter')
res = read_excel(fpath, 'other', engine='xlrd')
assert_array_equal(res, a2)
# 3D
a3 = ndtest((2, 3, 4))
# fpath/Sheet1/A1
# FIXME: merge_cells=False should be the default (until Pandas is fixed to read its format)
a3.to_excel(fpath, overwrite_file=True, engine='xlsxwriter', merge_cells=False)
# a3.to_excel('fpath', overwrite_file=True, engine='openpyxl')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a3)
# fpath/Sheet1/A20
# TODO: implement position (see above)
# a3.to_excel('fpath', 'Sheet1', 'A20', engine='xlsxwriter', merge_cells=False)
# res = read_excel('fpath', 'Sheet1', engine='xlrd', skiprows=19)
# assert_array_equal(res, a3)
# fpath/other/A1
a3.to_excel(fpath, 'other', engine='xlsxwriter', merge_cells=False)
res = read_excel(fpath, 'other', engine='xlrd')
assert_array_equal(res, a3)
# 1D
a1 = ndtest(3)
# fpath/Sheet1/A1
a1.to_excel(fpath, overwrite_file=True, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# fpath/Sheet1/A1(transposed)
a1.to_excel(fpath, transpose=True, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# fpath/Sheet1/A1
# stacked data (one column containing all the values and another column listing the context of the value)
a1.to_excel(fpath, wide=False, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
stacked_a1 = a1.reshape([a1.a, Axis(['value'])])
assert_array_equal(res, stacked_a1)
# 2D
a2 = ndtest((2, 3))
# fpath/Sheet1/A1
a2.to_excel(fpath, overwrite_file=True, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a2)
# fpath/Sheet1/A10
# TODO: this is currently not supported (though we would only need to translate A10 to startrow=0 and startcol=0
# a2.to_excel(fpath, 'Sheet1', 'A10', engine='xlsxwriter')
# res = read_excel('fpath', 'Sheet1', engine='xlrd', skiprows=9)
# assert_array_equal(res, a2)
# fpath/other/A1
a2.to_excel(fpath, 'other', engine='xlsxwriter')
res = read_excel(fpath, 'other', engine='xlrd')
assert_array_equal(res, a2)
# 3D
a3 = ndtest((2, 3, 4))
# fpath/Sheet1/A1
# FIXME: merge_cells=False should be the default (until Pandas is fixed to read its format)
a3.to_excel(fpath, overwrite_file=True, engine='xlsxwriter', merge_cells=False)
# a3.to_excel('fpath', overwrite_file=True, engine='openpyxl')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a3)
# fpath/Sheet1/A20
# TODO: implement position (see above)
# a3.to_excel('fpath', 'Sheet1', 'A20', engine='xlsxwriter', merge_cells=False)
# res = read_excel('fpath', 'Sheet1', engine='xlrd', skiprows=19)
# assert_array_equal(res, a3)
# fpath/other/A1
a3.to_excel(fpath, 'other', engine='xlsxwriter', merge_cells=False)
res = read_excel(fpath, 'other', engine='xlrd')
assert_array_equal(res, a3)
# passing group as sheet_name
a3 = ndtest((4, 3, 4))
os.remove(fpath)
# single element group
for label in a3.a:
a3[label].to_excel(fpath, label, engine='xlsxwriter')
# unnamed group
group = a3.c['c0,c2']
a3[group].to_excel(fpath, group, engine='xlsxwriter')
# unnamed group + slice
group = a3.c['c0::2']
a3[group].to_excel(fpath, group, engine='xlsxwriter')
# named group
group = a3.c['c0,c2'] >> 'even'
a3[group].to_excel(fpath, group, engine='xlsxwriter')
# group with name containing special characters (replaced by _)
group = a3.c['c0,c2'] >> r':name?with*special/\[char]'
a3[group].to_excel(fpath, group, engine='xlsxwriter')
@needs_xlwings
def test_to_excel_xlwings(tmpdir):
fpath = tmp_path(tmpdir, 'test_to_excel_xlwings.xlsx')
# 1D
a1 = ndtest(3)
# live book/Sheet1/A1
# a1.to_excel()
# fpath/Sheet1/A1 (create a new file if does not exist)
if os.path.isfile(fpath):
os.remove(fpath)
a1.to_excel(fpath, engine='xlwings')
# we use xlrd to read back instead of xlwings even if that should work, to make the test faster
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# fpath/Sheet1/A1(transposed)
a1.to_excel(fpath, transpose=True, engine='xlwings')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# fpath/Sheet1/A1
# stacked data (one column containing all the values and another column listing the context of the value)
a1.to_excel(fpath, wide=False, engine='xlwings')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# 2D
a2 = ndtest((2, 3))
# fpath/Sheet1/A1
a2.to_excel(fpath, overwrite_file=True, engine='xlwings')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a2)
# fpath/Sheet1/A10
a2.to_excel(fpath, 'Sheet1', 'A10', engine='xlwings')
res = read_excel(fpath, 'Sheet1', engine='xlrd', skiprows=9)
assert_array_equal(res, a2)
# fpath/other/A1
a2.to_excel(fpath, 'other', engine='xlwings')
res = read_excel(fpath, 'other', engine='xlrd')
assert_array_equal(res, a2)
# transpose
a2.to_excel(fpath, 'transpose', transpose=True, engine='xlwings')
res = read_excel(fpath, 'transpose', engine='xlrd')
assert_array_equal(res, a2.T)
# 3D
a3 = ndtest((2, 3, 4))
# fpath/Sheet1/A1
a3.to_excel(fpath, overwrite_file=True, engine='xlwings')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a3)
# fpath/Sheet1/A20
a3.to_excel(fpath, 'Sheet1', 'A20', engine='xlwings')
res = read_excel(fpath, 'Sheet1', engine='xlrd', skiprows=19)
assert_array_equal(res, a3)
# fpath/other/A1
a3.to_excel(fpath, 'other', engine='xlwings')
res = read_excel(fpath, 'other', engine='xlrd')
assert_array_equal(res, a3)
# passing group as sheet_name
a3 = ndtest((4, 3, 4))
os.remove(fpath)
# single element group
for label in a3.a:
a3[label].to_excel(fpath, label, engine='xlwings')
# unnamed group
group = a3.c['c0,c2']
a3[group].to_excel(fpath, group, engine='xlwings')
# unnamed group + slice
group = a3.c['c0::2']
a3[group].to_excel(fpath, group, engine='xlwings')
# named group
group = a3.c['c0,c2'] >> 'even'
a3[group].to_excel(fpath, group, engine='xlwings')
# group with name containing special characters (replaced by _)
group = a3.c['c0,c2'] >> r':name?with*special/\[char]'
a3[group].to_excel(fpath, group, engine='xlwings')
# checks sheet names
sheet_names = sorted(open_excel(fpath).sheet_names())
assert sheet_names == sorted(['a0', 'a1', 'a2', 'a3', 'c0,c2', 'c0__2', 'even',
'_name_with_special___char_'])
# sheet name of 31 characters (= maximum authorized length)
a3.to_excel(fpath, "sheetname_of_exactly_31_chars__", engine='xlwings')
# sheet name longer than 31 characters
with pytest.raises(ValueError, match="Sheet names cannot exceed 31 characters"):
a3.to_excel(fpath, "sheetname_longer_than_31_characters", engine='xlwings')
def test_dump():
# narrow format
res = list(ndtest(3).dump(wide=False, value_name='data'))
assert res == [['a', 'data'],
['a0', 0],
['a1', 1],
['a2', 2]]
# array with an anonymous axis and a wildcard axis
arr = ndtest((Axis('a0,a1'), Axis(2, 'b')))
res = arr.dump()
assert res == [['\\b', 0, 1],
['a0', 0, 1],
['a1', 2, 3]]
res = arr.dump(_axes_display_names=True)
assert res == [['{0}\\b*', 0, 1],
['a0', 0, 1],
['a1', 2, 3]]
@needs_xlwings
def test_open_excel(tmpdir):
# 1) Create new file
# ==================
fpath = inputpath('should_not_exist.xlsx')
# overwrite_file must be set to True to create a new file
with pytest.raises(ValueError):
open_excel(fpath)
# 2) with headers
# ===============
with open_excel(visible=False) as wb:
# 1D
a1 = ndtest(3)
# Sheet1/A1
wb['Sheet1'] = a1.dump()
res = wb['Sheet1'].load()
assert_array_equal(res, a1)
wb[0] = a1.dump()
res = wb[0].load()
assert_array_equal(res, a1)
# Sheet1/A1(transposed)
# TODO: implement .options on Sheet so that one can write:
# wb[0].options(transpose=True).value = a1.dump()
wb[0]['A1'].options(transpose=True).value = a1.dump()
# TODO: implement .options on Range so that you can write:
# res = wb[0]['A1:B4'].options(transpose=True).load()
# res = from_lists(wb[0]['A1:B4'].options(transpose=True).value)
# assert_array_equal(res, a1)
# 2D
a2 = ndtest((2, 3))
# Sheet1/A1
wb[0] = a2.dump()
res = wb[0].load()
assert_array_equal(res, a2)
# Sheet1/A10
wb[0]['A10'] = a2.dump()
res = wb[0]['A10:D12'].load()
assert_array_equal(res, a2)
# other/A1
wb['other'] = a2.dump()
res = wb['other'].load()
assert_array_equal(res, a2)
# new/A10
# we need to create the sheet first
wb['new'] = ''
wb['new']['A10'] = a2.dump()
res = wb['new']['A10:D12'].load()
assert_array_equal(res, a2)
# new2/A10
# cannot store the return value of "add" because that's a raw xlwings Sheet
wb.sheets.add('new2')
wb['new2']['A10'] = a2.dump()
res = wb['new2']['A10:D12'].load()
assert_array_equal(res, a2)
# 3D
a3 = ndtest((2, 3, 4))
# 3D/A1
wb['3D'] = a3.dump()
res = wb['3D'].load()
assert_array_equal(res, a3)
# 3D/A20
wb['3D']['A20'] = a3.dump()
res = wb['3D']['A20:F26'].load()
assert_array_equal(res, a3)
# 3D/A20 without name for columns
wb['3D']['A20'] = a3.dump()
# assume we have no name for the columns axis (ie change b\c to b)
wb['3D']['B20'] = 'b'
res = wb['3D']['A20:F26'].load(nb_axes=3)
assert_array_equal(res, a3.data)
# the two first axes should be the same
assert res.axes[:2] == a3.axes[:2]
# the third axis should have the same labels (but not the same name obviously)
assert_array_equal(res.axes[2].labels, a3.axes[2].labels)
with open_excel(inputpath('test.xlsx')) as wb:
expected = ndtest("a=a0..a2; b0..b2")
res = wb['2d_classic'].load()
assert_array_equal(res, expected)
# 3) without headers
# ==================
with open_excel(visible=False) as wb:
# 1D
a1 = ndtest(3)
# Sheet1/A1
wb['Sheet1'] = a1
res = wb['Sheet1'].load(header=False)
assert_array_equal(res, a1.data)
wb[0] = a1
res = wb[0].load(header=False)
assert_array_equal(res, a1.data)
# Sheet1/A1(transposed)
# FIXME: we need to .dump(header=False) explicitly because otherwise we go via ArrayConverter which
# includes labels. for consistency's sake we should either change ArrayConverter to not include
# labels, or change wb[0] = a1 to include them (and use wb[0] = a1.data to avoid them?) but that
# would be heavily backward incompatible and how would I load them back?
# wb[0]['A1'].options(transpose=True).value = a1
wb[0]['A1'].options(transpose=True).value = a1.dump(header=False)
res = wb[0]['A1:A3'].load(header=False)
assert_array_equal(res, a1.data)
# 2D
a2 = ndtest((2, 3))
# Sheet1/A1
wb[0] = a2
res = wb[0].load(header=False)
assert_array_equal(res, a2.data)
# Sheet1/A10
wb[0]['A10'] = a2
res = wb[0]['A10:C11'].load(header=False)
assert_array_equal(res, a2.data)
# other/A1
wb['other'] = a2
res = wb['other'].load(header=False)
assert_array_equal(res, a2.data)
# new/A10
# we need to create the sheet first
wb['new'] = ''
wb['new']['A10'] = a2
res = wb['new']['A10:C11'].load(header=False)
assert_array_equal(res, a2.data)
# 3D
a3 = ndtest((2, 3, 4))
# 3D/A1
wb['3D'] = a3
res = wb['3D'].load(header=False)
assert_array_equal(res, a3.data.reshape((6, 4)))
# 3D/A20
wb['3D']['A20'] = a3
res = wb['3D']['A20:D25'].load(header=False)
assert_array_equal(res, a3.data.reshape((6, 4)))
# 4) Blank cells
# ==============
# Excel sheet with blank cells on right/bottom border of the array to read
fpath = inputpath('test_blank_cells.xlsx')
with open_excel(fpath) as wb:
good = wb['good'].load()
bad1 = wb['blanksafter_morerowsthancols'].load()
bad2 = wb['blanksafter_morecolsthanrows'].load()
# with additional empty column in the middle of the array to read
good2 = wb['middleblankcol']['A1:E3'].load()
bad3 = wb['middleblankcol'].load()
bad4 = wb['16384col'].load()
assert_array_equal(bad1, good)
assert_array_equal(bad2, good)
assert_array_equal(bad3, good2)
assert_array_equal(bad4, good2)
# 5) anonymous and wilcard axes
# =============================
arr = ndtest((Axis('a0,a1'), Axis(2, 'b')))
fpath = tmp_path(tmpdir, 'anonymous_and_wildcard_axes.xlsx')
with open_excel(fpath, overwrite_file=True) as wb:
wb[0] = arr.dump()
res = wb[0].load()
# the result should be identical to the original array except we lost the information about
# the wildcard axis being a wildcard axis
expected = arr.set_axes('b', Axis([0, 1], 'b'))
assert_array_equal(res, expected)
# 6) crash test
# =============
arr = ndtest((2, 2))
fpath = tmp_path(tmpdir, 'temporary_test_file.xlsx')
# create and save a test file
with open_excel(fpath, overwrite_file=True) as wb:
wb['arr'] = arr.dump()
wb.save()
# raise exception when the file is open
try:
with open_excel(fpath, overwrite_file=True) as wb:
raise ValueError("")
except ValueError:
pass
# check if file is still available
with open_excel(fpath) as wb:
assert wb.sheet_names() == ['arr']
assert_array_equal(wb['arr'].load(), arr)
# remove file
if os.path.exists(fpath):
os.remove(fpath)
def test_ufuncs(small_array):
raw = small_array.data
# simple one-argument ufunc
assert_array_equal(exp(small_array), np.exp(raw))
# with out=
la_out = zeros(small_array.axes)
raw_out = np.zeros(raw.shape)
la_out2 = exp(small_array, la_out)
raw_out2 = np.exp(raw, raw_out)
# FIXME: this is not the case currently
# self.assertIs(la_out2, la_out)
assert_array_equal(la_out2, la_out)
assert raw_out2 is raw_out
assert_array_equal(la_out, raw_out)
# with out= and broadcasting
# we need to put the 'a' axis first because array numpy only supports that
la_out = zeros([Axis([0, 1, 2], 'a')] + list(small_array.axes))
raw_out = np.zeros((3,) + raw.shape)
la_out2 = exp(small_array, la_out)
raw_out2 = np.exp(raw, raw_out)
# self.assertIs(la_out2, la_out)
# XXX: why is la_out2 transposed?
assert_array_equal(la_out2.transpose(X.a), la_out)
assert raw_out2 is raw_out
assert_array_equal(la_out, raw_out)
sex, lipro = small_array.axes
low = small_array.sum(sex) // 4 + 3
raw_low = raw.sum(0) // 4 + 3
high = small_array.sum(sex) // 4 + 13
raw_high = raw.sum(0) // 4 + 13
# LA + scalars
assert_array_equal(small_array.clip(0, 10), raw.clip(0, 10))
assert_array_equal(clip(small_array, 0, 10), np.clip(raw, 0, 10))
# LA + LA (no broadcasting)
assert_array_equal(clip(small_array, 21 - small_array, 9 + small_array // 2),
np.clip(raw, 21 - raw, 9 + raw // 2))
# LA + LA (with broadcasting)
assert_array_equal(clip(small_array, low, high),
np.clip(raw, raw_low, raw_high))
# where (no broadcasting)
assert_array_equal(where(small_array < 5, -5, small_array),
np.where(raw < 5, -5, raw))
# where (transposed no broadcasting)
assert_array_equal(where(small_array < 5, -5, small_array.T),
np.where(raw < 5, -5, raw))
# where (with broadcasting)
result = where(small_array['P01'] < 5, -5, small_array)
assert result.axes.names == ['sex', 'lipro']
assert_array_equal(result, np.where(raw[:, [0]] < 5, -5, raw))
# round
small_float = small_array + 0.6
rounded = round(small_float)
assert_array_equal(rounded, np.round(raw + 0.6))
def test_diag():
# 2D -> 1D
a = ndtest((3, 3))
d = diag(a)
assert d.ndim == 1
assert d.i[0] == a.i[0, 0]
assert d.i[1] == a.i[1, 1]
assert d.i[2] == a.i[2, 2]
# 1D -> 2D
a2 = diag(d)
assert a2.ndim == 2
assert a2.i[0, 0] == a.i[0, 0]
assert a2.i[1, 1] == a.i[1, 1]
assert a2.i[2, 2] == a.i[2, 2]
# 3D -> 2D
a = ndtest((3, 3, 3))
d = diag(a)
assert d.ndim == 2
assert d.i[0, 0] == a.i[0, 0, 0]
assert d.i[1, 1] == a.i[1, 1, 1]
assert d.i[2, 2] == a.i[2, 2, 2]
# 3D -> 1D
d = diag(a, axes=(0, 1, 2))
assert d.ndim == 1
assert d.i[0] == a.i[0, 0, 0]
assert d.i[1] == a.i[1, 1, 1]
assert d.i[2] == a.i[2, 2, 2]
# 1D (anon) -> 2D
d_anon = d.rename(0, None).ignore_labels()
a2 = diag(d_anon)
assert a2.ndim == 2
# 1D (anon) -> 3D
a3 = diag(d_anon, ndim=3)
assert a2.ndim == 2
assert a3.i[0, 0, 0] == a.i[0, 0, 0]
assert a3.i[1, 1, 1] == a.i[1, 1, 1]
assert a3.i[2, 2, 2] == a.i[2, 2, 2]
# using Axis object
sex = Axis('sex=M,F')
a = eye(sex)
d = diag(a)
assert d.ndim == 1
assert d.axes.names == ['sex_sex']
assert_array_equal(d.axes.labels, [['M_M', 'F_F']])
assert d.i[0] == 1.0
assert d.i[1] == 1.0
@needs_python35
def test_matmul():
# 2D / anonymous axes
a1 = ndtest([Axis(3), Axis(3)])
a2 = eye(3, 3) * 2
# Note that we cannot use @ because that is an invalid syntax in Python 2
# Array value
assert_array_equal(a1.__matmul__(a2), ndtest([Axis(3), Axis(3)]) * 2)
# ndarray value
assert_array_equal(a1.__matmul__(a2.data), ndtest([Axis(3), Axis(3)]) * 2)
# non anonymous axes (N <= 2)
arr1d = ndtest(3)
arr2d = ndtest((3, 3))
# 1D @ 1D
res = arr1d.__matmul__(arr1d)
assert isinstance(res, np.integer)
assert res == 5
# 1D @ 2D
assert_array_equal(arr1d.__matmul__(arr2d),
Array([15, 18, 21], 'b=b0..b2'))
# 2D @ 1D
assert_array_equal(arr2d.__matmul__(arr1d),
Array([5, 14, 23], 'a=a0..a2'))
# 2D(a,b) @ 2D(a,b) -> 2D(a,b)
res = from_lists([['a\\b', 'b0', 'b1', 'b2'],
['a0', 15, 18, 21],
['a1', 42, 54, 66],
['a2', 69, 90, 111]])
assert_array_equal(arr2d.__matmul__(arr2d), res)
# 2D(a,b) @ 2D(b,a) -> 2D(a,a)
res = from_lists([['a\\a', 'a0', 'a1', 'a2'],
['a0', 5, 14, 23],
['a1', 14, 50, 86],
['a2', 23, 86, 149]])
assert_array_equal(arr2d.__matmul__(arr2d.T), res)
# ndarray value
assert_array_equal(arr1d.__matmul__(arr2d.data),
Array([15, 18, 21]))
assert_array_equal(arr2d.data.__matmul__(arr2d.T.data),
res.data)
# different axes
a1 = ndtest('a=a0..a1;b=b0..b2')
a2 = ndtest('b=b0..b2;c=c0..c3')
res = from_lists([[r'a\c', 'c0', 'c1', 'c2', 'c3'],
['a0', 20, 23, 26, 29],
['a1', 56, 68, 80, 92]])
assert_array_equal(a1.__matmul__(a2), res)
# non anonymous axes (N >= 2)
arr2d = ndtest((2, 2))
arr3d = ndtest((2, 2, 2))
arr4d = ndtest((2, 2, 2, 2))
a, b, c, d = arr4d.axes
e = Axis('e=e0,e1')
f = Axis('f=f0,f1')
# 4D(a, b, c, d) @ 3D(e, d, f) -> 5D(a, b, e, c, f)
arr3d = arr3d.set_axes([e, d, f])
res = from_lists([['a', 'b', 'e', 'c\\f', 'f0', 'f1'],
['a0', 'b0', 'e0', 'c0', 2, 3],
['a0', 'b0', 'e0', 'c1', 6, 11],
['a0', 'b0', 'e1', 'c0', 6, 7],
['a0', 'b0', 'e1', 'c1', 26, 31],
['a0', 'b1', 'e0', 'c0', 10, 19],
['a0', 'b1', 'e0', 'c1', 14, 27],
['a0', 'b1', 'e1', 'c0', 46, 55],
['a0', 'b1', 'e1', 'c1', 66, 79],
['a1', 'b0', 'e0', 'c0', 18, 35],
['a1', 'b0', 'e0', 'c1', 22, 43],
['a1', 'b0', 'e1', 'c0', 86, 103],
['a1', 'b0', 'e1', 'c1', 106, 127],
['a1', 'b1', 'e0', 'c0', 26, 51],
['a1', 'b1', 'e0', 'c1', 30, 59],
['a1', 'b1', 'e1', 'c0', 126, 151],
['a1', 'b1', 'e1', 'c1', 146, 175]])
assert_array_equal(arr4d.__matmul__(arr3d), res)
# 3D(e, d, f) @ 4D(a, b, c, d) -> 5D(e, a, b, d, d)
res = from_lists([['e', 'a', 'b', 'd\\d', 'd0', 'd1'],
['e0', 'a0', 'b0', 'd0', 2, 3],
['e0', 'a0', 'b0', 'd1', 6, 11],
['e0', 'a0', 'b1', 'd0', 6, 7],
['e0', 'a0', 'b1', 'd1', 26, 31],
['e0', 'a1', 'b0', 'd0', 10, 11],
['e0', 'a1', 'b0', 'd1', 46, 51],
['e0', 'a1', 'b1', 'd0', 14, 15],
['e0', 'a1', 'b1', 'd1', 66, 71],
['e1', 'a0', 'b0', 'd0', 10, 19],
['e1', 'a0', 'b0', 'd1', 14, 27],
['e1', 'a0', 'b1', 'd0', 46, 55],
['e1', 'a0', 'b1', 'd1', 66, 79],
['e1', 'a1', 'b0', 'd0', 82, 91],
['e1', 'a1', 'b0', 'd1', 118, 131],
['e1', 'a1', 'b1', 'd0', 118, 127],
['e1', 'a1', 'b1', 'd1', 170, 183]])
assert_array_equal(arr3d.__matmul__(arr4d), res)
# 4D(a, b, c, d) @ 3D(b, d, f) -> 4D(a, b, c, f)
arr3d = arr3d.set_axes([b, d, f])
res = from_lists([['a', 'b', 'c\\f', 'f0', 'f1'],
['a0', 'b0', 'c0', 2, 3],
['a0', 'b0', 'c1', 6, 11],
['a0', 'b1', 'c0', 46, 55],
['a0', 'b1', 'c1', 66, 79],
['a1', 'b0', 'c0', 18, 35],
['a1', 'b0', 'c1', 22, 43],
['a1', 'b1', 'c0', 126, 151],
['a1', 'b1', 'c1', 146, 175]])
assert_array_equal(arr4d.__matmul__(arr3d), res)
# 3D(b, d, f) @ 4D(a, b, c, d) -> 4D(b, a, d, d)
res = from_lists([['b', 'a', 'd\\d', 'd0', 'd1'],
['b0', 'a0', 'd0', 2, 3],
['b0', 'a0', 'd1', 6, 11],
['b0', 'a1', 'd0', 10, 11],
['b0', 'a1', 'd1', 46, 51],
['b1', 'a0', 'd0', 46, 55],
['b1', 'a0', 'd1', 66, 79],
['b1', 'a1', 'd0', 118, 127],
['b1', 'a1', 'd1', 170, 183]])
assert_array_equal(arr3d.__matmul__(arr4d), res)
# 4D(a, b, c, d) @ 2D(d, f) -> 5D(a, b, c, f)
arr2d = arr2d.set_axes([d, f])
res = from_lists([['a', 'b', 'c\\f', 'f0', 'f1'],
['a0', 'b0', 'c0', 2, 3],
['a0', 'b0', 'c1', 6, 11],
['a0', 'b1', 'c0', 10, 19],
['a0', 'b1', 'c1', 14, 27],
['a1', 'b0', 'c0', 18, 35],
['a1', 'b0', 'c1', 22, 43],
['a1', 'b1', 'c0', 26, 51],
['a1', 'b1', 'c1', 30, 59]])
assert_array_equal(arr4d.__matmul__(arr2d), res)
# 2D(d, f) @ 4D(a, b, c, d) -> 5D(a, b, d, d)
res = from_lists([['a', 'b', 'd\\d', 'd0', 'd1'],
['a0', 'b0', 'd0', 2, 3],
['a0', 'b0', 'd1', 6, 11],
['a0', 'b1', 'd0', 6, 7],
['a0', 'b1', 'd1', 26, 31],
['a1', 'b0', 'd0', 10, 11],
['a1', 'b0', 'd1', 46, 51],
['a1', 'b1', 'd0', 14, 15],
['a1', 'b1', 'd1', 66, 71]])
assert_array_equal(arr2d.__matmul__(arr4d), res)
@needs_python35
def test_rmatmul():
a1 = eye(3) * 2
a2 = ndtest([Axis(3), Axis(3)])
# equivalent to a1.data @ a2
res = a2.__rmatmul__(a1.data)
assert isinstance(res, Array)
assert_array_equal(res, ndtest([Axis(3), Axis(3)]) * 2)
def test_broadcast_with():
a1 = ndtest((3, 2))
a2 = ndtest(3)
b = a2.broadcast_with(a1)
assert b.ndim == a1.ndim
assert b.shape == (3, 1)
assert_array_equal(b.i[:, 0], a2)
# anonymous axes
a1 = ndtest([Axis(3), Axis(2)])
a2 = ndtest(Axis(3))
b = a2.broadcast_with(a1)
assert b.ndim == a1.ndim
assert b.shape == (3, 1)
assert_array_equal(b.i[:, 0], a2)
a1 = ndtest([Axis(1), Axis(3)])
a2 = ndtest([Axis(3), Axis(1)])
b = a2.broadcast_with(a1)
assert b.ndim == 2
# common axes are reordered according to target (a1 in this case)
assert b.shape == (1, 3)
assert_larray_equiv(b, a2)
a1 = ndtest([Axis(2), Axis(3)])
a2 = ndtest([Axis(3), Axis(2)])
b = a2.broadcast_with(a1)
assert b.ndim == 2
assert b.shape == (2, 3)
assert_larray_equiv(b, a2)
def test_plot():
pass
# small_h = small['M']
# small_h.plot(kind='bar')
# small_h.plot()
# small_h.hist()
# large_data = np.random.randn(1000)
# tick_v = np.random.randint(ord('a'), ord('z'), size=1000)
# ticks = [chr(c) for c in tick_v]
# large_axis = Axis('large', ticks)
# large = Array(large_data, axes=[large_axis])
# large.plot()
# large.hist()
def test_combine_axes():
# combine N axes into 1
# =====================
arr = ndtest((2, 3, 4, 5))
res = arr.combine_axes((X.a, X.b))
assert res.axes.names == ['a_b', 'c', 'd']
assert res.size == arr.size
assert res.shape == (2 * 3, 4, 5)
assert_array_equal(res.axes.a_b.labels[:2], ['a0_b0', 'a0_b1'])
assert_array_equal(res['a1_b0'], arr['a1', 'b0'])
res = arr.combine_axes((X.a, X.c))
assert res.axes.names == ['a_c', 'b', 'd']
assert res.size == arr.size
assert res.shape == (2 * 4, 3, 5)
assert_array_equal(res.axes.a_c.labels[:2], ['a0_c0', 'a0_c1'])
assert_array_equal(res['a1_c0'], arr['a1', 'c0'])
res = arr.combine_axes((X.b, X.d))
assert res.axes.names == ['a', 'b_d', 'c']
assert res.size == arr.size
assert res.shape == (2, 3 * 5, 4)
assert_array_equal(res.axes.b_d.labels[:2], ['b0_d0', 'b0_d1'])
assert_array_equal(res['b1_d0'], arr['b1', 'd0'])
# combine M axes into N
# =====================
arr = ndtest((2, 3, 4, 4, 3, 2))
# using a list of tuples
res = arr.combine_axes([('a', 'c'), ('b', 'f'), ('d', 'e')])
assert res.axes.names == ['a_c', 'b_f', 'd_e']
assert res.size == arr.size
assert res.shape == (2 * 4, 3 * 2, 4 * 3)
assert list(res.axes.a_c.labels[:2]) == ['a0_c0', 'a0_c1']
assert list(res.axes.b_f.labels[:2]) == ['b0_f0', 'b0_f1']
assert list(res.axes.d_e.labels[:2]) == ['d0_e0', 'd0_e1']
assert res['a0_c2', 'b1_f1', 'd3_e2'] == arr['a0', 'b1', 'c2', 'd3', 'e2', 'f1']
res = arr.combine_axes([('a', 'c'), ('b', 'e', 'f')])
assert res.axes.names == ['a_c', 'b_e_f', 'd']
assert res.size == arr.size
assert res.shape == (2 * 4, 3 * 3 * 2, 4)
assert list(res.axes.b_e_f.labels[:4]) == ['b0_e0_f0', 'b0_e0_f1', 'b0_e1_f0', 'b0_e1_f1']
assert_array_equal(res['a0_c2', 'b1_e2_f1'], arr['a0', 'b1', 'c2', 'e2', 'f1'])
# using a dict (-> user defined axes names)
res = arr.combine_axes({('a', 'c'): 'AC', ('b', 'f'): 'BF', ('d', 'e'): 'DE'})
assert res.axes.names == ['AC', 'BF', 'DE']
assert res.size == arr.size
assert res.shape == (2 * 4, 3 * 2, 4 * 3)
res = arr.combine_axes({('a', 'c'): 'AC', ('b', 'e', 'f'): 'BEF'})
assert res.axes.names == ['AC', 'BEF', 'd']
assert res.size == arr.size
assert res.shape == (2 * 4, 3 * 3 * 2, 4)
# combine with wildcard=True
arr = ndtest((2, 3))
res = arr.combine_axes(wildcard=True)
assert res.axes.names == ['a_b']
assert res.size == arr.size
assert res.shape == (6,)
assert_array_equal(res.axes[0].labels, np.arange(6))
def test_split_axes():
# split one axis
# ==============
# default sep
arr = ndtest((2, 3, 4, 5))
combined = arr.combine_axes(('b', 'd'))
assert combined.axes.names == ['a', 'b_d', 'c']
res = combined.split_axes('b_d')
assert res.axes.names == ['a', 'b', 'd', 'c']
assert res.shape == (2, 3, 5, 4)
assert_array_equal(res.transpose('a', 'b', 'c', 'd'), arr)
# with specified names
res = combined.rename(b_d='bd').split_axes('bd', names=('b', 'd'))
assert res.axes.names == ['a', 'b', 'd', 'c']
assert res.shape == (2, 3, 5, 4)
assert_array_equal(res.transpose('a', 'b', 'c', 'd'), arr)
# regex
res = combined.split_axes('b_d', names=['b', 'd'], regex=r'(\w+)_(\w+)')
assert res.axes.names == ['a', 'b', 'd', 'c']
assert res.shape == (2, 3, 5, 4)
assert_array_equal(res.transpose('a', 'b', 'c', 'd'), arr)
# custom sep
combined = ndtest('a|b=a0|b0,a0|b1')
res = combined.split_axes(sep='|')
assert_array_equal(res, ndtest('a=a0;b=b0,b1'))
# split several axes at once
# ==========================
arr = ndtest('a_b=a0_b0..a1_b2; c=c0..c3; d=d0..d3; e_f=e0_f0..e2_f1')
# using a list of tuples
res = arr.split_axes(['a_b', 'e_f'])
assert res.axes.names == ['a', 'b', 'c', 'd', 'e', 'f']
assert res.size == arr.size
assert res.shape == (2, 3, 4, 4, 3, 2)
assert list(res.axes.a.labels) == ['a0', 'a1']
assert list(res.axes.b.labels) == ['b0', 'b1', 'b2']
assert list(res.axes.e.labels) == ['e0', 'e1', 'e2']
assert list(res.axes.f.labels) == ['f0', 'f1']
assert res['a0', 'b1', 'c2', 'd3', 'e2', 'f1'] == arr['a0_b1', 'c2', 'd3', 'e2_f1']
# default to all axes with name containing the delimiter _
assert_array_equal(arr.split_axes(), res)
# using a dict (-> user defined axes names)
res = arr.split_axes({'a_b': ('A', 'B'), 'e_f': ('E', 'F')})
assert res.axes.names == ['A', 'B', 'c', 'd', 'E', 'F']
assert res.size == arr.size
assert res.shape == (2, 3, 4, 4, 3, 2)
# split an axis in more than 2 axes
arr = ndtest('a_b_c=a0_b0_c0..a1_b2_c3; d=d0..d3; e_f=e0_f0..e2_f1')
res = arr.split_axes(['a_b_c', 'e_f'])
assert res.axes.names == ['a', 'b', 'c', 'd', 'e', 'f']
assert res.size == arr.size
assert res.shape == (2, 3, 4, 4, 3, 2)
assert list(res.axes.a.labels) == ['a0', 'a1']
assert list(res.axes.b.labels) == ['b0', 'b1', 'b2']
assert list(res.axes.e.labels) == ['e0', 'e1', 'e2']
assert list(res.axes.f.labels) == ['f0', 'f1']
assert res['a0', 'b1', 'c2', 'd3', 'e2', 'f1'] == arr['a0_b1_c2', 'd3', 'e2_f1']
# split an axis in more than 2 axes + passing a dict
res = arr.split_axes({'a_b_c': ('A', 'B', 'C'), 'e_f': ('E', 'F')})
assert res.axes.names == ['A', 'B', 'C', 'd', 'E', 'F']
assert res.size == arr.size
assert res.shape == (2, 3, 4, 4, 3, 2)
# using regex
arr = ndtest('ab=a0b0..a1b2; c=c0..c3; d=d0..d3; ef=e0f0..e2f1')
res = arr.split_axes({'ab': ('a', 'b'), 'ef': ('e', 'f')}, regex=r'(\w{2})(\w{2})')
assert res.axes.names == ['a', 'b', 'c', 'd', 'e', 'f']
assert res.size == arr.size
assert res.shape == (2, 3, 4, 4, 3, 2)
assert list(res.axes.a.labels) == ['a0', 'a1']
assert list(res.axes.b.labels) == ['b0', 'b1', 'b2']
assert list(res.axes.e.labels) == ['e0', 'e1', 'e2']
assert list(res.axes.f.labels) == ['f0', 'f1']
assert res['a0', 'b1', 'c2', 'd3', 'e2', 'f1'] == arr['a0b1', 'c2', 'd3', 'e2f1']
# labels with object dtype
arr = ndtest((2, 2, 2)).combine_axes(('a', 'b'))
arr = arr.set_axes([Axis(a.labels.astype(object), a.name) for a in arr.axes])
res = arr.split_axes()
expected_kind = 'U' if sys.version_info[0] >= 3 else 'S'
assert res.a.labels.dtype.kind == expected_kind
assert res.b.labels.dtype.kind == expected_kind
assert res.c.labels.dtype.kind == 'O'
assert_array_equal(res, ndtest((2, 2, 2)))
# not sorted by first part then second part (issue #364)
arr = ndtest((2, 3))
combined = arr.combine_axes()['a0_b0, a1_b0, a0_b1, a1_b1, a0_b2, a1_b2']
assert_array_equal(combined.split_axes('a_b'), arr)
# another weirdly sorted test
combined = arr.combine_axes()['a0_b1, a0_b0, a0_b2, a1_b1, a1_b0, a1_b2']
assert_array_equal(combined.split_axes('a_b'), arr['b1,b0,b2'])
# combined does not contain all combinations of labels (issue #369)
combined_partial = combined[['a0_b0', 'a0_b1', 'a1_b1', 'a0_b2', 'a1_b2']]
expected = arr.astype(float)
expected['a1', 'b0'] = nan
assert_array_nan_equal(combined_partial.split_axes('a_b'), expected)
# split labels are ambiguous (issue #485)
combined = ndtest('a_b=a0_b0..a1_b1;c_d=a0_b0..a1_b1')
expected = ndtest('a=a0,a1;b=b0,b1;c=a0,a1;d=b0,b1')
assert_array_equal(combined.split_axes(('a_b', 'c_d')), expected)
# anonymous axes
combined = ndtest('a0_b0,a0_b1,a0_b2,a1_b0,a1_b1,a1_b2')
expected = ndtest('a0,a1;b0,b1,b2')
assert_array_equal(combined.split_axes(0), expected)
# when no axis is specified and no axis contains the sep, split_axes is a no-op.
assert_array_equal(combined.split_axes(), combined)
def test_stack():
# stack along a single axis
# =========================
# simple
a = Axis('a=a0,a1,a2')
b = Axis('b=b0,b1')
arr0 = ndtest(a)
arr1 = ndtest(a, start=-1)
res = stack((arr0, arr1), b)
expected = Array([[0, -1],
[1, 0],
[2, 1]], [a, b])
assert_array_equal(res, expected)
# same but using a group as the stacking axis
larger_b = Axis('b=b0..b3')
res = stack((arr0, arr1), larger_b[:'b1'])
assert_array_equal(res, expected)
# simple with anonymous axis
axis0 = Axis(3)
arr0 = ndtest(axis0)
arr1 = ndtest(axis0, start=-1)
res = stack((arr0, arr1), b)
expected = Array([[0, -1],
[1, 0],
[2, 1]], [axis0, b])
assert_array_equal(res, expected)
# using res_axes
res = stack({'b0': 0, 'b1': 1}, axes=b, res_axes=(a, b))
expected = Array([[0, 1],
[0, 1],
[0, 1]], [a, b])
assert_array_equal(res, expected)
# giving elements as on Array containing Arrays
sex = Axis('sex=M,F')
# not using the same length for nat and type, otherwise numpy gets confused :(
arr1 = ones('nat=BE, FO')
arr2 = zeros('type=1..3')
array_of_arrays = Array([arr1, arr2], sex)
res = stack(array_of_arrays, sex)
expected = from_string(r"""nat type\sex M F
BE 1 1.0 0.0
BE 2 1.0 0.0
BE 3 1.0 0.0
FO 1 1.0 0.0
FO 2 1.0 0.0
FO 3 1.0 0.0""")
assert_array_equal(res, expected)
# non scalar/non Array
res = stack(([1, 2, 3], [4, 5, 6]))
expected = Array([[1, 4],
[2, 5],
[3, 6]])
assert_array_equal(res, expected)
# stack along multiple axes
# =========================
# a) simple
res = stack({('a0', 'b0'): 0,
('a0', 'b1'): 1,
('a1', 'b0'): 2,
('a1', 'b1'): 3,
('a2', 'b0'): 4,
('a2', 'b1'): 5},
(a, b))
expected = ndtest((a, b))
assert_array_equal(res, expected)
# b) keys not given in axes iteration order
res = stack({('a0', 'b0'): 0,
('a1', 'b0'): 2,
('a2', 'b0'): 4,
('a0', 'b1'): 1,
('a1', 'b1'): 3,
('a2', 'b1'): 5},
(a, b))
expected = ndtest((a, b))
assert_array_equal(res, expected)
# c) key parts not given in the order of axes (ie key part for b before key part for a)
res = stack({('a0', 'b0'): 0,
('a1', 'b0'): 1,
('a2', 'b0'): 2,
('a0', 'b1'): 3,
('a1', 'b1'): 4,
('a2', 'b1'): 5},
(b, a))
expected = ndtest((b, a))
assert_array_equal(res, expected)
# d) same as c) but with a key-value sequence
res = stack([(('a0', 'b0'), 0),
(('a1', 'b0'), 1),
(('a2', 'b0'), 2),
(('a0', 'b1'), 3),
(('a1', 'b1'), 4),
(('a2', 'b1'), 5)],
(b, a))
expected = ndtest((b, a))
assert_array_equal(res, expected)
@needs_python36
def test_stack_kwargs_no_axis_labels():
# these tests rely on kwargs ordering, hence python 3.6
# 1) using scalars
# ----------------
# a) with an axis name
res = stack(a0=0, a1=1, axes='a')
expected = Array([0, 1], 'a=a0,a1')
assert_array_equal(res, expected)
# b) without an axis name
res = stack(a0=0, a1=1)
expected = Array([0, 1], 'a0,a1')
assert_array_equal(res, expected)
# 2) dict of arrays
# -----------------
a = Axis('a=a0,a1,a2')
arr0 = ndtest(a)
arr1 = ndtest(a, start=-1)
# a) with an axis name
res = stack(b0=arr0, b1=arr1, axes='b')
expected = Array([[0, -1],
[1, 0],
[2, 1]], [a, 'b=b0,b1'])
assert_array_equal(res, expected)
# b) without an axis name
res = stack(b0=arr0, b1=arr1)
expected = Array([[0, -1],
[1, 0],
[2, 1]], [a, 'b0,b1'])
assert_array_equal(res, expected)
@needs_python37
def test_stack_dict_no_axis_labels():
# these tests rely on dict ordering, hence python 3.7
# 1) dict of scalars
# ------------------
# a) with an axis name
res = stack({'a0': 0, 'a1': 1}, 'a')
expected = Array([0, 1], 'a=a0,a1')
assert_array_equal(res, expected)
# b) without an axis name
res = stack({'a0': 0, 'a1': 1})
expected = Array([0, 1], 'a0,a1')
assert_array_equal(res, expected)
# 2) dict of arrays
# -----------------
a = Axis('a=a0,a1,a2')
arr0 = ndtest(a)
arr1 = ndtest(a, start=-1)
# a) with an axis name
res = stack({'b0': arr0, 'b1': arr1}, 'b')
expected = Array([[0, -1],
[1, 0],
[2, 1]], [a, 'b=b0,b1'])
assert_array_equal(res, expected)
# b) without an axis name
res = stack({'b0': arr0, 'b1': arr1})
expected = Array([[0, -1],
[1, 0],
[2, 1]], [a, 'b0,b1'])
assert_array_equal(res, expected)
def test_0darray_convert():
int_arr = Array(1)
assert int(int_arr) == 1
assert float(int_arr) == 1.0
assert int_arr.__index__() == 1
float_arr = Array(1.0)
assert int(float_arr) == 1
assert float(float_arr) == 1.0
with pytest.raises(TypeError) as e_info:
float_arr.__index__()
msg = e_info.value.args[0]
expected_np11 = "only integer arrays with one element can be converted to an index"
expected_np12 = "only integer scalar arrays can be converted to a scalar index"
assert msg in {expected_np11, expected_np12}
def test_deprecated_methods():
with pytest.warns(FutureWarning) as caught_warnings:
ndtest((2, 2)).with_axes('a', 'd=d0,d1')
assert len(caught_warnings) == 1
assert caught_warnings[0].message.args[0] == "with_axes() is deprecated. Use set_axes() instead."
assert caught_warnings[0].filename == __file__
with pytest.warns(FutureWarning) as caught_warnings:
ndtest((2, 2)).combine_axes().split_axis()
assert len(caught_warnings) == 1
assert caught_warnings[0].message.args[0] == "split_axis() is deprecated. Use split_axes() instead."
assert caught_warnings[0].filename == __file__
def test_eq():
a = ndtest((2, 3, 4))
ao = a.astype(object)
assert_array_equal(ao.eq(ao['c0'], nans_equal=True), a == a['c0'])
if __name__ == "__main__":
# import doctest
# import unittest
# from larray import core
# doctest.testmod(core)
# unittest.main()
pytest.main()
| gpl-3.0 |
alexeyum/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 63 | 2945 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from itertools import cycle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positive elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
colors = cycle(['b', 'r', 'g', 'c', 'k'])
neg_log_alphas_lasso = -np.log10(alphas_lasso)
neg_log_alphas_enet = -np.log10(alphas_enet)
for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso)
for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_positive_lasso, coef_pl, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet)
for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors):
l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c)
l2 = plt.plot(neg_log_alphas_positive_enet, coef_pe, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
kcompher/BuildingMachineLearningSystemsWithPython | ch08/stacked5.py | 4 | 1194 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from sklearn.linear_model import LinearRegression
from load_ml100k import load
import numpy as np
import similar_movie
import usermodel
import corrneighbours
sreviews = load()
reviews = sreviews.toarray()
reg = LinearRegression()
es = np.array([
usermodel.all_estimates(sreviews),
similar_movie.all_estimates(reviews, k=1),
similar_movie.all_estimates(reviews, k=2),
similar_movie.all_estimates(reviews, k=3),
similar_movie.all_estimates(reviews, k=4),
similar_movie.all_estimates(reviews, k=5),
])
total_error = 0.0
coefficients = []
for u in xrange(reviews.shape[0]):
es0 = np.delete(es, u, 1)
r0 = np.delete(reviews, u, 0)
X, Y = np.where(r0 > 0)
X = es[:, X, Y]
y = r0[r0 > 0]
reg.fit(X.T, y)
coefficients.append(reg.coef_)
r0 = reviews[u]
X = np.where(r0 > 0)
p0 = reg.predict(es[:, u, X].squeeze().T)
err0 = r0[r0 > 0] - p0
total_error += np.dot(err0, err0)
coefficients = np.array(coefficients)
| mit |
thilbern/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
mmagnuski/mypy | sarna/viz.py | 1 | 11131 | import numpy as np
import matplotlib.pyplot as plt
from borsar.viz import Topo, heatmap, color_limits, add_colorbar_to_axis
from borsar.utils import find_range, find_index, get_info
from borsar.channels import get_ch_pos
from .utils import group
def get_spatial_colors(inst):
'''Get mne-style spatial colors for given mne object instance.'''
from mne.viz.evoked import _rgb
x, y, z = get_ch_pos(inst).T
return _rgb(x, y, z)
def get_color_cycle():
return plt.rcParams['axes.prop_cycle'].by_key()['color']
def set_3d_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
modified from:
http://stackoverflow.com/questions/13685386/matplotlib-equal-unit-length-with-equal-aspect-ratio-z-axis-is-not-equal-to
'''
x_lim, y_lim, z_lim = ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()
def get_range(lim):
return lim[1] - lim[0], np.mean(lim)
x_range, x_mean = get_range(x_lim)
y_range, y_mean = get_range(y_lim)
z_range, z_mean = get_range(z_lim)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5 * max([x_range, y_range, z_range])
ax.set_xlim3d([x_mean - plot_radius, x_mean + plot_radius])
ax.set_ylim3d([y_mean - plot_radius, y_mean + plot_radius])
ax.set_zlim3d([z_mean - plot_radius, z_mean + plot_radius])
# # for Topo, setting channel props:
# for ch in ch_ind:
# self.chans[ch].set_color('white')
# self.chans[ch].set_radius(0.01)
# self.chans[ch].set_zorder(4)
# - [ ] enhance Topo with that functionality
# - [ ] later will not be needed when masking is smarter in mne
def selected_Topo(values, info, indices, replace='zero', **kawrgs):
# if a different info is passed - compare and
# fill unused channels with 0
ch_num = len(info['ch_names'])
if replace == 'zero':
vals = np.zeros(ch_num)
elif replace == 'min':
vals = np.ones(ch_num) * min(values)
elif replace == 'max':
vals = np.ones(ch_num) * max(values)
vals[indices] = values
# topoplot
tp = Topo(vals, info, show=False, **kawrgs)
# make all topography lines solid
tp.solid_lines()
tp.remove_levels(0.)
# final touches
tp.fig.set_facecolor('white')
return tp
# TODO - [ ] consider moving selection out to some simple interface
# with .__init__ and .next()?
# - [ ] or maybe just use np.random.choice
# - [ ] change zoom to size
# - [ ] add 'auto' zoom / size
def imscatter(x, y, images, ax=None, zoom=1, selection='random'):
'''
Plot images as scatter points. Puppy scatter, anyone?
modified version of this stack overflow answer:
https://stackoverflow.com/questions/22566284/matplotlib-how-to-plot-images-instead-of-points
FIXME : add docs
'''
if ax is None:
ax = plt.gca()
if isinstance(images, str): images = [images]
if isinstance(images, list) and isinstance(images[0], str):
images = [plt.imread(image) for image in images]
if not isinstance(zoom, list):
zoom = [zoom] * len(images)
im = [OffsetImage(im, zoom=zm) for im, zm in zip(images, zoom)]
x, y = np.atleast_1d(x, y)
artists = []
img_idx = list(range(len(im)))
sel_idx = img_idx.copy()
for idx, (x0, y0) in enumerate(zip(x, y)):
# refill sel_idx if empty
if 'replace' not in selection and len(sel_idx) < 1:
sel_idx = img_idx.copy()
# select image index
if 'random' in selection:
take = sample(range(len(sel_idx)), 1)[0]
elif 'replace' in selection:
take = idx % len(im)
else:
take = 0
if 'replace' not in selection:
im_idx = sel_idx.pop(take)
else:
im_idx = sel_idx[take]
# add image to axis
ab = AnnotationBbox(im[im_idx], (x0, y0),
xycoords='data', frameon=False)
artists.append(ax.add_artist(ab))
ax.update_datalim(np.column_stack([x, y]))
ax.autoscale() # this may not be needed
return artists
# - [ ] support list/tuple of slices for which_highligh?
# - [ ] `level` and `height` are unused but should allow for highlight that
# takes only a fraction of the axis
# kind='patch', level=0.04, height=0.03
def highlight(x_values, highlight, color=None, alpha=0.3, axis=None):
'''Highlight ranges along x axis.
Parameters
----------
x_values : numpy array
Values specifying x axis points along which which_highligh operates.
highlight : slice | numpy array
Slice or boolean numpy array defining which values in ``x_values``
should be highlighed.
color : str | list | numpy array, optional
Color in format understood by matplotlib. The default is 'orange'.
alpha : float
Highlight patch transparency. 0.3 by default.
axis : matplotlib Axes | None
Highligh on an already present axis. Default is ``None`` which creates
a new figure with one axis.
'''
from matplotlib.patches import Rectangle
color = 'orange' if color is None else color
axis = plt.gca() if axis is None else axis
ylims = axis.get_ylim()
y_rng = np.diff(ylims)
hlf_dist = np.diff(x_values).mean() / 2
if isinstance(highlight, np.ndarray):
grp = group(highlight, return_slice=True)
elif isinstance(highlight, slice):
grp = [highlight]
for slc in grp:
this_x = x_values[slc]
start = this_x[0] - hlf_dist
length = np.diff(this_x[[0, -1]]) + hlf_dist * 2
ptch = Rectangle((start, ylims[0]), length, y_rng, lw=0,
facecolor=color, alpha=alpha)
axis.add_patch(ptch)
# - [ ] test a little and change the API and options
def significance_bar(start, end, height, displaystring, lw=0.1,
markersize=7, boxpad=-1.2, fontsize=14, color='k'):
from matplotlib.markers import TICKDOWN
# draw a line with downticks at the ends
plt.plot([start, end], [height] * 1, '-', color=color, lw=lw,
marker=TICKDOWN, markeredgewidth=linewidth, markersize=markersize)
# draw the text with a bounding box covering up the line
bbox_dict = dict(facecolor='0.', edgecolor='none',
boxstyle='Square,pad=' + str(boxpad))
plt.text(-1.4 * (start + end), height, displaystring, ha='center',
va='center', bbox=bbox_dict, size=fontsize)
# another way:
# x0, x1 = 1, 2
# y, h, col = tips['total_bill'].max() + 1, 1, 'k'
# plt.plot([x0, x0, x1, x1], [y, y+h, y+h, y], lw=0.4, c=col)
# plt.text((x0+x1)*.4, y+h, "ns", ha='center', va='bottom', color=col)
# - [ ] remove and add heatmap options to borsar.Cluster.plot()
def plot_cluster_heatmap(values, mask=None, axis=None, x_axis=None,
y_axis=None, outlines=False, colorbar=True,
line_kwargs=dict(), ch_names=None, freq=None):
n_channels = values.shape[0]
if x_axis is None and freq is not None:
x_axis = freq
heatmap(values, mask=mask, axis=axis, x_axis=x_axis, y_axis=y_axis,
outlines=outlines, colorbar=True, line_kwargs=dict())
if ch_names is not None:
plt.yticks(np.arange(len(ch_names)) + 0.5, ch_names);
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(8)
def plot_topomap_raw(raw, times=None):
'''``plot_topomap`` for mne ``Raw`` objects.
Parameters
----------
raw : mne Raw object
mne Raw object instance
times : list of ints or floats (or just int/flot)
Times to plot topomaps for.
returns
-------
fig : matplotlib figure
Figure handle
'''
import mne
import matplotlib.pyplot as plt
if times is None:
raise TypeError('times must be a list of real values.')
elif not isinstance(times, list):
times = [times]
# ADD a check for channel pos
# pick only data channels (currently only eeg)
picks = mne.pick_types(raw.info, eeg=True, meg=False)
info = mne.pick_info(raw.info, sel=picks)
# find relevant time samples
time_samples = find_index(raw.times, times)
# pick only data channels (currently only eeg)
picks = mne.pick_types(raw.info, eeg=True, meg=False)
info = mne.pick_info(raw.info, sel=picks)
# find relevant time samples and select data
time_samples = np.array(find_index(raw.times, times))
data_slices = raw._data[picks[:, np.newaxis], time_samples[np.newaxis, :]]
#- raw._data[picks, :].mean(axis=1)[:, np.newaxis] # remove DC by default
fig, axes = plt.subplots(ncols=len(times), squeeze=False)
minmax = np.abs([data_slices.min(), data_slices.max()]).max()
for i, ax in enumerate(axes.ravel()):
mne.viz.plot_topomap(data_slices[:, i], info, axes=ax,
vmin=-minmax, vmax=minmax)
ax.set_title('{} s'.format(times[i]))
return fig
def prepare_equal_axes(fig, n_axes, space=[0.02, 0.98, 0.02, 0.98],
w_dist=0.025, h_dist=0.025):
'''Prepare equal axes spanning given figure space. FIXME docs'''
# transforms
trans = fig.transFigure
trans_inv = fig.transFigure.inverted()
# FIXME - change space to be [x0, y0, w, h]
axes_space = np.array(space).reshape((2, 2)).T
axes_space_disp_units = trans.transform(axes_space)
space_h = np.diff(axes_space_disp_units[:, 1])[0]
space_w = np.diff(axes_space_disp_units[:, 0])[0]
w_dist, h_dist = trans.transform([w_dist, h_dist])
h = (space_h - (h_dist * (n_axes[0] - 1))) / n_axes[0]
w = (space_w - (w_dist * (n_axes[1] - 1))) / n_axes[1]
# if too much width or height space of each axes - increase spacing
# FIXME, ADD: other spacing options (for example align to top)
if w > h:
w_diff = w - h
additional_w_dist = w_diff * n_axes[1] / (n_axes[1] - 1)
w_dist += additional_w_dist
w = h
elif h > w:
h_diff = h - w
additional_h_dist = h_diff * n_axes[0] / (n_axes[0] - 1)
h_dist += additional_h_dist
h = w
# start creating axes from bottom left corner,
# then flipud the axis matrix
axes = list()
w_fig, h_fig = trans_inv.transform([w, h])
for row_idx in range(n_axes[0]):
row_axes = list()
y0 = axes_space_disp_units[0, 1] + (h + h_dist) * row_idx
_, y0 = trans_inv.transform([0, y0])
for col_idx in range(n_axes[1]):
x0 = axes_space_disp_units[0, 0] + (w + w_dist) * col_idx
x0, _ = trans_inv.transform([x0, 0])
ax = fig.add_axes([x0, y0, w_fig, h_fig])
row_axes.append(ax)
axes.append(row_axes)
axes = np.flipud(np.array(axes))
return axes
| mit |
ConflictGK/Codecatch-RSSE | CLUSTERER/kmeans_clustering.py | 1 | 6107 | import json
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import euclidean
from CLUSTERER.k_selector import select_k, tfidf_methodinvocations, multidimensional_scaling, clean_up_snippets
import sys
sys.path.append('../')
pd.options.mode.chained_assignment = None # turn off chained assignment pandas warning
def kmeans_clustering(clean_snippets, K):
"""
Performs the final clustering based on the KMeans algorithm. The K is selected based on bunch of other methods
For more info see select_k.py script. Finally, organizes data in a pandas DataFrame.
:param snippet_data: Info of snippets retrieved by query and augmented by the mining done in previous stages
:return: clustered_results_frame: DataFrame of the snippet info after KMeans clustering
:return: term_freqs_by_cluster: DataFrame containing all the APIs and the weight of each API per cluster.
"""
# List of MethodInvocations sequences as comma separated strings
mis = [snippet[0] for snippet in clean_snippets]
# Snippets' code
code = [snippet[1] for snippet in clean_snippets]
# Snippets' url
url = [snippet[2] for snippet in clean_snippets]
# Snippets' url positions
url_position = [snippet[3] for snippet in clean_snippets]
# Snippets' in page order
in_page_order = [snippet[4] for snippet in clean_snippets]
api_qualified_names = [snippet[5] for snippet in clean_snippets]
# Perform TFIDF transformation and retrieve the dissimilarity measure (cosine distance) of snippets
dist, tfidf_matrix, terms = tfidf_methodinvocations(methodinvocations=mis)
# Multidimensional Scaling to reduce to 2D
X = multidimensional_scaling(distances=dist)
# KMeans
km = KMeans(n_clusters=K, init='k-means++', max_iter=1000, n_init=15, random_state=20)
cluster_labels = km.fit_predict(X)
# Calculate average silhouette score for all samples
silhouette_avg = silhouette_score(X, cluster_labels, metric='cosine')
print("For n_clusters = {}, average silhouette score = {:.2f}".format(K, silhouette_avg))
# Calculate term frequencies (weights) per cluster
term_freqs = pd.DataFrame(tfidf_matrix.toarray(), columns=terms)
term_freqs['cluster'] = cluster_labels
term_freqs_by_cluster = term_freqs.groupby('cluster').sum()
# Calculate distances from cluster center
centers = km.cluster_centers_
dists_cluster_center = [euclidean(point, centers[label]) for point, label in zip(X, cluster_labels)]
# Calculate number of API calls
num_api_calls = [len(mis[i].split(',')) for i in range(len(clean_snippets))]
# Calculate lines of code per snippet
loc = [(c.count('\n') + 1) for c in code]
# Calculate weight of API calls
api_weights = []
for mi, label in zip(mis, cluster_labels):
method_list = set(mi.split(','))
terms = term_freqs_by_cluster.iloc[label, :]
weight = 0
for method in method_list:
if method.lower() in terms.index:
weight += terms[method.lower()]
api_weights.append(weight)
# Organize results in a DataFrame
results = {
'MethodInvocations': mis,
'API Qualified Names': api_qualified_names,
'Code': code,
'Url': url,
'Url Position': url_position,
'In Page Order': in_page_order,
'Dist Center': dists_cluster_center,
'Num API Calls': num_api_calls,
'API Weights': api_weights,
'LOC': loc,
'Cluster': cluster_labels
}
clustered_results_frame = pd.DataFrame(results)
return clustered_results_frame, term_freqs_by_cluster
def present_results(pp, results, term_freqs):
finalresult = {}
num_snippets_per_cluster = results['Cluster'].value_counts() # Number of snippets per cluster
num_clusters = len(num_snippets_per_cluster) # Number of clusters
finalresult["num_snippets_per_cluster"] = [int(i) for i in num_snippets_per_cluster]
finalresult["num_clusters"] = len([i for i in num_snippets_per_cluster])
finalresult["clusters"] = []
# Separate snippets based on their cluster label
snippet_clusters = [results.loc[results['Cluster'] == i] for i in range(num_clusters)]
# Re-rank inter-cluster snippets based on their score and scale them in range[0, 1], descending order
scaler = MinMaxScaler(feature_range=(0, 1))
for cluster in snippet_clusters:
cluster.sort_values(by='Dist Center', ascending=True, inplace=True)
cluster['Score'] = 1 - scaler.fit_transform(cluster['Dist Center'].values.reshape(-1, 1))
snippets = json.loads(cluster.to_json(orient='records'))
for i, _ in enumerate(snippets):
snippets[i]["API_Weights"] = snippets[i].pop("API Weights")
snippets[i]["API_Qualified_Names"] = snippets[i].pop("API Qualified Names")
snippets[i]["Url_Position"] = snippets[i].pop("Url Position")
snippets[i]["In_Page_Order"] = snippets[i].pop("In Page Order")
snippets[i]["Num_API_Calls"] = snippets[i].pop("Num API Calls")
snippets[i]["Dist_Center"] = snippets[i].pop("Dist Center")
snippets[i]["MethodInvocations"] = snippets[i].pop("MethodInvocations").split(',')
finalresult["clusters"].append({
"cluster_snippets": snippets
})
for i in range(len(term_freqs)):
avg_cluster_api_weights = snippet_clusters[i]['API Weights'].mean()
finalresult["clusters"][i]["avg_cluster_api_weights"] = avg_cluster_api_weights
top_apis_by_cluster = term_freqs.iloc[i, :].sort_values(ascending=False).index[0:8].values
finalresult["clusters"][i]["top_apis_by_cluster"] = top_apis_by_cluster.tolist()
with open(pp.RESULTS_D, 'w') as outfile:
outfile.write(json.dumps(finalresult, indent = 3))
def run_clustering(pp):
# Load query data file
with open(pp.RESULTS_C, 'r') as datafile:
data = json.load(datafile)
# Filter duplicates and snippets with low similarity
clean_snippets = clean_up_snippets(data)
print("Snippets after deleting duplicates/similars = {}".format(len(clean_snippets)))
# Select number of clusters for KMeans
K = select_k(pp, clean_snippets)
clustered_data, cluster_term_freqs = kmeans_clustering(clean_snippets, K)
present_results(pp, results=clustered_data, term_freqs=cluster_term_freqs)
if __name__ == "__main__":
run_clustering()
| mit |
webeng/DeepLearningTutorials | code/sktheano_cnn.py | 1 | 39161 | """
Aaron Berndsen:
A Conformal Neural Network using Theano for computation and structure,
but built to obey sklearn's basic 'fit' 'predict' functionality
*code largely motivated from deeplearning.net examples
and Graham Taylor's "Vanilla RNN" (https://github.com/gwtaylor/theano-rnn/blob/master/rnn.py)
You'll require theano and libblas-dev
tips/tricks/notes:
* if training set is large (>O(100)) and redundant, use stochastic gradient descent (batch_size=1), otherwise use conjugate descent (batch_size > 1)
*
Basic usage:
import nnetwork as NN
n = NN.NeuralNetwork(design=[8,8]) # a NN with two hidden layers of 8 neurons each
n.fit(Xtrain, ytrain)
pred = n.predict(Xtest)
"""
import cPickle as pickle
import logging
import numpy as np
from sklearn.base import BaseEstimator
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
import logging
import os
_logger = logging.getLogger("theano.gof.compilelock")
_logger.setLevel(logging.WARN)
logger = logging.getLogger(__name__)
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class CNN(object):
"""
Convolutional Neural Network,
backend by Theano, but compliant with sklearn interface.
This class holds the actual layers, while MetaCNN does
the fit/predict routines. You should init with MetaCNN.
There are three layers:
layer0 : a convolutional filter making filters[0] shifted copies,
then downsampled by max pooling in grids of poolsize[0]
(N, 1, nx, ny)
--> (N, nkerns[0], nx1, ny1) (nx1 = nx - filters[0][0] + 1)
(ny1 = ny - filters[0][1] + 1)
--> (N, nkerns[0], nx1/poolsize[0][1], ny1/poolsize[0][1])
layer1 : a convolutional filter making filters[1] shifted copies,
then downsampled by max pooling in grids of poolsize[1]
(N, nkerns[0], nx1/2, ny1/2)
--> (N, nkerns[1], nx2, ny2) (nx2 = nx1 - filters[1][0] + 1)
--> (N, nkerns[1], nx3, ny3) (nx3 = nx2/poolsize[1][0], ny3=ny2/poolsize[1][1])
layer2 : hidden layer of nkerns[1]*nx3*ny3 input features and n_hidden hidden neurons
layer3 : final LR layer with n_hidden neural inputs and n_out outputs/classes
"""
def __init__(self, input, n_in=1, n_out=0, activation=T.tanh,
nkerns=[20,50],
filters=[15,9],
poolsize=[(3,3),(2,2)],
n_hidden=500,
output_type='softmax', batch_size=25,
use_symbolic_softmax=False):
"""
n_in : width (or length) of input image (assumed square)
n_out : number of class labels
:type nkerns: list of ints
:param nkerns: number of kernels on each layer
:type filters: list of ints
:param filters: width of convolution
:type poolsize: list of 2-tuples
:param poolsize: maxpooling in convolution layer (index-0),
and direction x or y (index-1)
:type n_hidden: int
:param n_hidden: number of hidden neurons
:type output_type: string
:param output_type: type of decision 'softmax', 'binary', 'real'
:type batch_size: int
:param batch_size: number of samples in each training batch. Default 200.
"""
self.activation = activation
self.output_type = output_type
#shape of input images
nx, ny = n_in, n_in
if use_symbolic_softmax:
def symbolic_softmax(x):
e = T.exp(x)
return e / T.sum(e, axis=1).dimshuffle(0, 'x')
self.softmax = symbolic_softmax
else:
self.softmax = T.nnet.softmax
# Reshape matrix of rasterized images of shape (batch_size, nx*ny)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
layer0_input = input.reshape((batch_size, 1, nx, ny))
#layer0_input = input.reshape((batch_size, 3, nx, ny))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (nx-5+1,ny-5+1)=(24,24)
# maxpooling reduces this further to (nx/2,ny/2) = (12,12)
# 4D output tensor is thus of shape (batch_size,nkerns[0],12,12)
nim = filters[0]
# rng = np.random.RandomState(23455)
# self.layer0 = LeNetConvPoolLayer(rng, input=layer0_input,
# image_shape=(batch_size, 1, nx, ny),
# filter_shape=(nkerns[0], 1, nim, nim),
# poolsize=poolsize[0])
rng = np.random.RandomState(23455)
self.layer0 = LeNetConvPoolLayer(rng, input=layer0_input,
image_shape=(batch_size, 1, nx, ny),
filter_shape=(nkerns[0], 1, nim, nim),
poolsize=poolsize[0])
# Construct the second convolutional pooling layer
# filtering reduces the image size to (nbin-nim+1,nbin-nim+1) = x
# maxpooling reduces this further to (x/2,x/2) = y
# 4D output tensor is thus of shape (nkerns[0],nkerns[1],y,y)
poox = (nx - nim + 1)/poolsize[0][0]
pooy = (ny - nim + 1)/poolsize[0][1]
nconf = filters[1]
self.layer1 = LeNetConvPoolLayer(rng, input=self.layer0.output,
image_shape=(batch_size, nkerns[0], poox, pooy),
filter_shape=(nkerns[1], nkerns[0], nconf, nconf),
poolsize=poolsize[1])
# the TanhLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size,num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (20,32*4*4) = (20,512)
layer2_input = self.layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
poo2x = (poox-nconf+1)/poolsize[1][0]
poo2y = (pooy-nconf+1)/poolsize[1][1]
self.layer2 = HiddenLayer(rng, input=layer2_input,
n_in=nkerns[1]*poo2x*poo2y,
n_out=n_hidden, activation=T.tanh)
# classify the values of the fully-connected sigmoidal layer
self.layer3 = LogisticRegression(input=self.layer2.output,
n_in=n_hidden, n_out=n_out)
# CNN regularization
self.L1 = self.layer3.L1
self.L2_sqr = self.layer3.L2_sqr
# create a list of all model parameters to be fit by gradient descent
self.params = self.layer3.params + self.layer2.params\
+ self.layer1.params + self.layer0.params
self.y_pred = self.layer3.y_pred
self.p_y_given_x = self.layer3.p_y_given_x
self.layer2_output = self.layer2.input
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
#same as negative-log-likelikhood
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of time steps (call it T) in the sequence
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the sequence
over the total number of examples in the sequence ; zero one
loss over the size of the sequence
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_pred', self.y_pred.type))
if self.output_type in ('binary', 'softmax'):
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
class MetaCNN(BaseEstimator):
"""
the actual CNN is not init-ed until .fit is called.
We determine the image input size (assumed square images) and
the number of outputs in .fit from the training data
"""
def __init__(self, learning_rate=0.05,
n_epochs=60, batch_size=25, activation='tanh',
nkerns=[20,45],
n_hidden=500,
filters=[15,7],
poolsize=[(3,3),(2,2)],
output_type='softmax',
L1_reg=0.00, L2_reg=0.00,
use_symbolic_softmax=False,
### Note, n_in and n_out are actually set in
### .fit, they are here to help cPickle
n_in=50, n_out=2):
self.learning_rate = float(learning_rate)
self.nkerns = nkerns
self.n_hidden = n_hidden
self.filters = filters
self.poolsize = poolsize
self.n_epochs = int(n_epochs)
self.batch_size = int(batch_size)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.use_symbolic_softmax = use_symbolic_softmax
self.n_in = n_in
self.n_out = n_out
def ready(self):
"""
this routine is called from "fit" since we determine the
image size (assumed square) and output labels from the training data.
"""
#input
self.x = T.matrix('x')
#output (a label)
self.y = T.ivector('y')
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.cnn = CNN(input=self.x, n_in=self.n_in,
n_out=self.n_out, activation=activation,
nkerns=self.nkerns,
filters=self.filters,
n_hidden=self.n_hidden,
poolsize=self.poolsize,
output_type=self.output_type,
batch_size=self.batch_size,
use_symbolic_softmax=self.use_symbolic_softmax)
#self.cnn.predict expects batch_size number of inputs.
#we wrap those functions and pad as necessary in 'def predict' and 'def predict_proba'
self.predict_wrap = theano.function(inputs=[self.x],
outputs=self.cnn.y_pred,
mode=mode)
self.predict_vector = theano.function(inputs=[self.x],
outputs=self.cnn.layer2_output,
mode=mode)
self.predict_proba_wrap = theano.function(inputs=[self.x],
outputs=self.cnn.p_y_given_x,
mode=mode)
def score(self, X, y):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training set.
y : array-like, shape = [n_samples]
Labels for X.
Returns
-------
z : float
"""
return np.mean(self.predict(X) == y)
def fit(self, X_train, Y_train, X_test=None, Y_test=None,
validation_frequency=2, n_epochs=None):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (T x n_in)
Y_train : ndarray (T x n_out)
validation_frequency : int
in terms of number of sequences (or number of weight updates)
n_epochs : None (used to override self.n_epochs from init.
"""
#prepare the CNN
self.n_in = int(np.sqrt(X_train.shape[1]))
self.n_in = 64
#print self.n_in
#print X_train.shape[1]
self.n_out = len(np.unique(Y_train))
print "n_out:{}".format(self.n_out)
self.ready()
if X_test is not None:
assert(Y_test is not None)
interactive = True
test_set_x, test_set_y = self.shared_dataset((X_test, Y_test))
else:
interactive = False
train_set_x, train_set_y = self.shared_dataset((X_train, Y_train))
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches /= self.batch_size
if interactive:
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches /= self.batch_size
######################
# BUILD ACTUAL MODEL #
######################
logger.info('... building the model')
index = T.lscalar('index') # index to a [mini]batch
# cost = self.cnn.loss(self.y)\
# + self.L1_reg * self.cnn.L1\
# + self.L2_reg * self.cnn.L2_sqr
cost = self.cnn.loss(self.y)
compute_train_error = theano.function(inputs=[index, ],
outputs=self.cnn.loss(self.y),
givens={
self.x: train_set_x[index * self.batch_size: (index + 1) * self.batch_size],
self.y: train_set_y[index * self.batch_size: (index + 1) * self.batch_size]},
mode=mode)
if interactive:
compute_test_error = theano.function(inputs=[index, ],
outputs=self.cnn.loss(self.y),
givens={
self.x: test_set_x[index * self.batch_size: (index + 1) * self.batch_size],
self.y: test_set_y[index * self.batch_size: (index + 1) * self.batch_size]},
mode=mode)
# create a list of all model parameters to be fit by gradient descent
self.params = self.cnn.params
# create a list of gradients for all model parameters
self.grads = T.grad(cost, self.params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates dictionary by automatically looping over all
# (params[i],grads[i]) pairs.
# self.updates = {}
# for param_i, grad_i in zip(self.params, self.grads):
# self.updates[param_i] = param_i - self.learning_rate * grad_i
self.updates = [
(param_i, param_i - self.learning_rate * grad_i)
for param_i, grad_i in zip(self.params, self.grads)
]
train_model = theano.function(
[index],
cost,
updates=self.updates,
givens={
self.x: train_set_x[index * self.batch_size: (index + 1) * self.batch_size],
self.y: train_set_y[index * self.batch_size: (index + 1) * self.batch_size]
}
)
###############
# TRAIN MODEL #
###############
logger.info('... training')
# early-stopping parameters
patience = 1000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_test_loss = np.inf
best_iter = 0
epoch = 0
done_looping = False
if n_epochs is None:
n_epochs = self.n_epochs
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for idx in xrange(n_train_batches):
iter = epoch * n_train_batches + idx
cost_ij = train_model(idx)
if iter % validation_frequency == 0:
# compute loss on training set
train_losses = [compute_train_error(i)
for i in xrange(n_train_batches)]
this_train_loss = np.mean(train_losses)
if interactive:
test_losses = [compute_test_error(i)
for i in xrange(n_test_batches)]
this_test_loss = np.mean(test_losses)
note = 'epoch %i, seq %i/%i, train loss %f '\
'test loss loss %f learning rate: %f' % \
(epoch, idx + 1, n_train_batches,
this_train_loss, this_test_loss, self.learning_rate)
logger.info(note)
print note
if this_test_loss < best_test_loss:
#improve patience if loss improvement is good enough
if this_test_loss < best_test_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_test_loss = this_test_loss
best_iter = iter
else:
logger.info('epoch %i, seq %i/%i, train loss %f '
'learning rate: %f' % \
(epoch, idx + 1, n_train_batches, this_train_loss,
self.learning_rate))
if patience <= iter:
done_looping = True
break
logger.info("Optimization complete")
logger.info("Best xval score of %f %% obtained at iteration %i" %
(best_test_loss * 100., best_iter))
print "Best xval score of %f %% obtained at iteration %i" % (best_test_loss * 100., best_iter)
def predict(self, data):
"""
the CNN expects inputs with Nsamples = self.batch_size.
In order to run 'predict' on an arbitrary number of samples we
pad as necessary.
"""
if isinstance(data, list):
data = np.array(data)
if data.ndim == 1:
data = np.array([data])
nsamples = data.shape[0]
n_batches = nsamples//self.batch_size
n_rem = nsamples%self.batch_size
if n_batches > 0:
preds = [list(self.predict_wrap(data[i*self.batch_size:(i+1)*self.batch_size]))\
for i in range(n_batches)]
else:
preds = []
if n_rem > 0:
z = np.zeros((self.batch_size, self.n_in * self.n_in))
z[0:n_rem] = data[n_batches*self.batch_size:n_batches*self.batch_size+n_rem]
preds.append(self.predict_wrap(z)[0:n_rem])
return np.hstack(preds).flatten()
def predict_proba(self, data):
"""
the CNN expects inputs with Nsamples = self.batch_size.
In order to run 'predict_proba' on an arbitrary number of samples we
pad as necessary.
"""
if isinstance(data, list):
data = np.array(data)
if data.ndim == 1:
data = np.array([data])
nsamples = data.shape[0]
n_batches = nsamples//self.batch_size
n_rem = nsamples%self.batch_size
if n_batches > 0:
preds = [list(self.predict_proba_wrap(data[i*self.batch_size:(i+1)*self.batch_size]))\
for i in range(n_batches)]
else:
preds = []
if n_rem > 0:
z = np.zeros((self.batch_size, self.n_in * self.n_in))
z[0:n_rem] = data[n_batches*self.batch_size:n_batches*self.batch_size+n_rem]
preds.append(self.predict_proba_wrap(z)[0:n_rem])
return np.vstack(preds)
def shared_dataset(self, data_xy):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX))
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
#check if we're using ubc_AI.classifier wrapper,
#adding it's params to the state
if hasattr(self, 'orig_class'):
superparams = self.get_params()
#now switch to orig. class (MetaCNN)
oc = self.orig_class
cc = self.__class__
self.__class__ = oc
params = self.get_params()
for k, v in superparams.iteritems():
params[k] = v
self.__class__ = cc
else:
params = self.get_params() #sklearn.BaseEstimator
if hasattr(self, 'cnn'):
weights = [p.get_value() for p in self.cnn.params]
else:
weights = []
state = (params, weights)
return state
def _set_weights(self, weights):
""" Set fittable parameters from weights sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
i = iter(weights)
if hasattr(self, 'cnn'):
for param in self.cnn.params:
param.set_value(i.next())
def __setstate__(self, state):
""" Set parameters from state sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
params, weights = state
#we may have several classes or superclasses
for k in ['n_comp', 'use_pca', 'feature']:
if k in params:
self.set_params(**{k:params[k]})
params.pop(k)
#now switch to MetaCNN if necessary
if hasattr(self,'orig_class'):
cc = self.__class__
oc = self.orig_class
self.__class__ = oc
self.set_params(**params)
self.ready()
if len(weights) > 0:
self._set_weights(weights)
self.__class__ = cc
else:
self.set_params(**params)
self.ready()
self._set_weights(weights)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
import datetime
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
#fname = '%s.%s.pkl' % (class_name, date_str)
fname = 'best_model.pkl'
fabspath = os.path.join(fpath, fname)
logger.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logger.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(value=np.zeros((n_in, n_out),
dtype=theano.config.floatX),
name='W', borrow=True)
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=np.zeros((n_out,),
dtype=theano.config.floatX),
name='b', borrow=True)
# compute vector of class-membership probabilities in symbolic form
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# compute prediction as class whose probability is maximal in
# symbolic form
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# parameters of the model
self.params = [self.W, self.b]
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', target.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = np.asarray(rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None
else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height,filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows,#cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
np.prod(poolsize))
# initialize weights with random weights
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(np.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX),
borrow=True)
# the bias is a 1D tensor -- one bias per output feature map
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(input=input, filters=self.W,
filter_shape=filter_shape, image_shape=image_shape)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(input=conv_out,
ds=poolsize, ignore_border=True)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1,n_filters,1,1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
if __name__ == '__main__':
from fetex_image import FetexImage
import cPickle
from PIL import Image
fetex = FetexImage()
base_path = '/Applications/MAMP/htdocs/DeepLearningTutorials'
folder = base_path + '/data/cnn-furniture/'
#folder = base_path + '/data/categories/'
fe = FetexImage(verbose=True,support_per_class=10000,folder=folder,mode='L')
#train_set,valid_set,test_set,input = fe.processImagesPipeline()
pkl_file = open( '../data/train_set.pkl', 'rb')
train_set = cPickle.load(pkl_file)
# pkl_file = open( '../data/valid_set.pkl', 'rb')
# valid_set = cPickle.load(pkl_file)
pkl_file = open( '../data/test_set.pkl', 'rb')
test_set = cPickle.load(pkl_file)
pkl_file = open( '../data/lb.pkl', 'rb')
lb = cPickle.load(pkl_file)
X_train, Y_train = train_set
X_test, Y_test = test_set
X_train = np.asarray(X_train, dtype=theano.config.floatX)
X_test = np.asarray(X_test, dtype=theano.config.floatX)
print X_train.shape
"""
def evaluate_lenet5(learning_rate=0.1, n_epochs=2,
dataset='mnist.pkl.gz',
nkerns=[(25 / 1) , (25 / 1)], batch_size=400):
"""
cnn = MetaCNN(learning_rate=0.1,nkerns=[25,25], filters=[5,5], batch_size=400,poolsize=[(2,2),(2,2)], n_hidden=400 , n_epochs=4 , n_out=5)
#cnn = MetaCNN(n_epochs=10, filters=[5,5])
#cnn.fit(X_train,Y_train, X_test=X_test, Y_test=Y_test)
#cnn.save(fpath=base_path + '/data/')
# # Predictions after training
cnn.load(base_path + '/data/best_model.pkl')
#cnn.load('/home/ubuntu/DeepLearningTutorials/data/MetaCNN.2015-10-19-13:59:18.pkl')
#sample = np.asarray(X_train, dtype=theano.config.floatX)
#print sample[0].reshape((64,64)).shape
#Image.fromarray(sample[2].reshape((64,64)),mode="L").show()
arr = np.array(np.round((X_train[0] * 256).reshape((64,64))),dtype=np.uint8)
Image.fromarray(arr,mode="L").show()
arr = np.array(np.round((X_train[1] * 256).reshape((64,64))),dtype=np.uint8)
Image.fromarray(arr,mode="L").show()
arr = np.array(np.round((X_train[2] * 256).reshape((64,64))),dtype=np.uint8)
Image.fromarray(arr,mode="L").show()
print Y_train[0:3]
# arr = np.array(np.round((X_train[1300] * 256).reshape((64,64))),dtype=np.uint8)
# Image.fromarray(arr,mode="L").show()
#print sample[0]
# #print sample.shape
#sample = X_train[0:25]
print lb.classes_
#sample = X_train[0]
#print Y_train[4000:4100]
print cnn.predict(X_train[0:3])
# sample = X_train[4400]
# print Y_train[4400]
# print cnn.predict(sample)
# pkl_file = open( '../data/X_original.pkl', 'rb')
# X_original = cPickle.load(pkl_file)
# a = X_original[0:25]
# a = np.asarray(a, dtype=theano.config.floatX)
# #fe.reconstructImage(a[2]).show()
# def flaten_aux(V):
# return V.flatten(order='F')
# a = a.transpose(0, 3, 1, 2)
# a = map(flaten_aux, a)
# #print cnn.predict(a)
# print cnn.predict_vector(a).shape
#print cnn.predict(sample)
# #print cnn.predict_wrap(sample) | bsd-3-clause |
Bulochkin/tensorflow_pack | tensorflow/examples/learn/iris_val_based_early_stopping.py | 62 | 2827 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with early stopping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
learn = tf.contrib.learn
def clean_folder(folder):
"""Cleans the given folder if it exists."""
try:
shutil.rmtree(folder)
except OSError:
pass
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=0.2, random_state=42)
val_monitor = learn.monitors.ValidationMonitor(
x_val, y_val, early_stopping_rounds=200)
model_dir = '/tmp/iris_model'
clean_folder(model_dir)
# classifier with early stopping on training data
classifier1 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir)
classifier1.fit(x=x_train, y=y_train, steps=2000)
predictions1 = list(classifier1.predict(x_test, as_iterable=True))
score1 = metrics.accuracy_score(y_test, predictions1)
model_dir = '/tmp/iris_model_val'
clean_folder(model_dir)
# classifier with early stopping on validation data, save frequently for
# monitor to pick up new checkpoints.
classifier2 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir,
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))
classifier2.fit(x=x_train, y=y_train, steps=2000, monitors=[val_monitor])
predictions2 = list(classifier2.predict(x_test, as_iterable=True))
score2 = metrics.accuracy_score(y_test, predictions2)
# In many applications, the score is improved by using early stopping
print('score1: ', score1)
print('score2: ', score2)
print('score2 > score1: ', score2 > score1)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
beepee14/scikit-learn | sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
anntzer/scikit-learn | sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_contraints.py | 7 | 14424 | import numpy as np
import pytest
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import X_BINNED_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import MonotonicConstraint
from sklearn.ensemble._hist_gradient_boosting.splitting import (
Splitter,
compute_node_value
)
from sklearn.ensemble._hist_gradient_boosting.histogram import HistogramBuilder
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingClassifier
def is_increasing(a):
return (np.diff(a) >= 0.0).all()
def is_decreasing(a):
return (np.diff(a) <= 0.0).all()
def assert_leaves_values_monotonic(predictor, monotonic_cst):
# make sure leaves values (from left to right) are either all increasing
# or all decreasing (or neither) depending on the monotonic constraint.
nodes = predictor.nodes
def get_leaves_values():
"""get leaves values from left to right"""
values = []
def depth_first_collect_leaf_values(node_idx):
node = nodes[node_idx]
if node['is_leaf']:
values.append(node['value'])
return
depth_first_collect_leaf_values(node['left'])
depth_first_collect_leaf_values(node['right'])
depth_first_collect_leaf_values(0) # start at root (0)
return values
values = get_leaves_values()
if monotonic_cst == MonotonicConstraint.NO_CST:
# some increasing, some decreasing
assert not is_increasing(values) and not is_decreasing(values)
elif monotonic_cst == MonotonicConstraint.POS:
# all increasing
assert is_increasing(values)
else: # NEG
# all decreasing
assert is_decreasing(values)
def assert_children_values_monotonic(predictor, monotonic_cst):
# Make sure siblings values respect the monotonic constraints. Left should
# be lower (resp greater) than right child if constraint is POS (resp.
# NEG).
# Note that this property alone isn't enough to ensure full monotonicity,
# since we also need to guanrantee that all the descendents of the left
# child won't be greater (resp. lower) than the right child, or its
# descendents. That's why we need to bound the predicted values (this is
# tested in assert_children_values_bounded)
nodes = predictor.nodes
left_lower = []
left_greater = []
for node in nodes:
if node['is_leaf']:
continue
left_idx = node['left']
right_idx = node['right']
if nodes[left_idx]['value'] < nodes[right_idx]['value']:
left_lower.append(node)
elif nodes[left_idx]['value'] > nodes[right_idx]['value']:
left_greater.append(node)
if monotonic_cst == MonotonicConstraint.NO_CST:
assert left_lower and left_greater
elif monotonic_cst == MonotonicConstraint.POS:
assert left_lower and not left_greater
else: # NEG
assert not left_lower and left_greater
def assert_children_values_bounded(grower, monotonic_cst):
# Make sure that the values of the children of a node are bounded by the
# middle value between that node and its sibling (if there is a monotonic
# constraint).
# As a bonus, we also check that the siblings values are properly ordered
# which is slightly redundant with assert_children_values_monotonic (but
# this check is done on the grower nodes whereas
# assert_children_values_monotonic is done on the predictor nodes)
if monotonic_cst == MonotonicConstraint.NO_CST:
return
def recursively_check_children_node_values(node, right_sibling=None):
if node.is_leaf:
return
if right_sibling is not None:
middle = (node.value + right_sibling.value) / 2
if monotonic_cst == MonotonicConstraint.POS:
assert (node.left_child.value <=
node.right_child.value <=
middle)
if not right_sibling.is_leaf:
assert (middle <=
right_sibling.left_child.value <=
right_sibling.right_child.value)
else: # NEG
assert (node.left_child.value >=
node.right_child.value >=
middle)
if not right_sibling.is_leaf:
assert (middle >=
right_sibling.left_child.value >=
right_sibling.right_child.value)
recursively_check_children_node_values(node.left_child,
right_sibling=node.right_child)
recursively_check_children_node_values(node.right_child)
recursively_check_children_node_values(grower.root)
@pytest.mark.parametrize('seed', range(3))
@pytest.mark.parametrize('monotonic_cst', (
MonotonicConstraint.NO_CST,
MonotonicConstraint.POS,
MonotonicConstraint.NEG,
))
def test_nodes_values(monotonic_cst, seed):
# Build a single tree with only one feature, and make sure the nodes
# values respect the monotonic constraints.
# Considering the following tree with a monotonic POS constraint, we
# should have:
#
# root
# / \
# 5 10 # middle = 7.5
# / \ / \
# a b c d
#
# a <= b and c <= d (assert_children_values_monotonic)
# a, b <= middle <= c, d (assert_children_values_bounded)
# a <= b <= c <= d (assert_leaves_values_monotonic)
#
# The last one is a consequence of the others, but can't hurt to check
rng = np.random.RandomState(seed)
n_samples = 1000
n_features = 1
X_binned = rng.randint(0, 255, size=(n_samples, n_features),
dtype=np.uint8)
X_binned = np.asfortranarray(X_binned)
gradients = rng.normal(size=n_samples).astype(G_H_DTYPE)
hessians = np.ones(shape=1, dtype=G_H_DTYPE)
grower = TreeGrower(X_binned, gradients, hessians,
monotonic_cst=[monotonic_cst],
shrinkage=.1)
grower.grow()
# grow() will shrink the leaves values at the very end. For our comparison
# tests, we need to revert the shrinkage of the leaves, else we would
# compare the value of a leaf (shrunk) with a node (not shrunk) and the
# test would not be correct.
for leave in grower.finalized_leaves:
leave.value /= grower.shrinkage
# We pass undefined binning_thresholds because we won't use predict anyway
predictor = grower.make_predictor(
binning_thresholds=np.zeros((X_binned.shape[1], X_binned.max() + 1))
)
# The consistency of the bounds can only be checked on the tree grower
# as the node bounds are not copied into the predictor tree. The
# consistency checks on the values of node children and leaves can be
# done either on the grower tree or on the predictor tree. We only
# do those checks on the predictor tree as the latter is derived from
# the former.
assert_children_values_monotonic(predictor, monotonic_cst)
assert_children_values_bounded(grower, monotonic_cst)
assert_leaves_values_monotonic(predictor, monotonic_cst)
@pytest.mark.parametrize('seed', range(3))
def test_predictions(seed):
# Train a model with a POS constraint on the first feature and a NEG
# constraint on the second feature, and make sure the constraints are
# respected by checking the predictions.
# test adapted from lightgbm's test_monotone_constraint(), itself inspired
# by https://xgboost.readthedocs.io/en/latest/tutorials/monotonic.html
rng = np.random.RandomState(seed)
n_samples = 1000
f_0 = rng.rand(n_samples) # positive correlation with y
f_1 = rng.rand(n_samples) # negative correslation with y
X = np.c_[f_0, f_1]
noise = rng.normal(loc=0.0, scale=0.01, size=n_samples)
y = (5 * f_0 + np.sin(10 * np.pi * f_0) -
5 * f_1 - np.cos(10 * np.pi * f_1) +
noise)
gbdt = HistGradientBoostingRegressor(monotonic_cst=[1, -1])
gbdt.fit(X, y)
linspace = np.linspace(0, 1, 100)
sin = np.sin(linspace)
constant = np.full_like(linspace, fill_value=.5)
# We now assert the predictions properly respect the constraints, on each
# feature. When testing for a feature we need to set the other one to a
# constant, because the monotonic constraints are only a "all else being
# equal" type of constraints:
# a constraint on the first feature only means that
# x0 < x0' => f(x0, x1) < f(x0', x1)
# while x1 stays constant.
# The constraint does not guanrantee that
# x0 < x0' => f(x0, x1) < f(x0', x1')
# First feature (POS)
# assert pred is all increasing when f_0 is all increasing
X = np.c_[linspace, constant]
pred = gbdt.predict(X)
assert is_increasing(pred)
# assert pred actually follows the variations of f_0
X = np.c_[sin, constant]
pred = gbdt.predict(X)
assert np.all((np.diff(pred) >= 0) == (np.diff(sin) >= 0))
# Second feature (NEG)
# assert pred is all decreasing when f_1 is all increasing
X = np.c_[constant, linspace]
pred = gbdt.predict(X)
assert is_decreasing(pred)
# assert pred actually follows the inverse variations of f_1
X = np.c_[constant, sin]
pred = gbdt.predict(X)
assert ((np.diff(pred) <= 0) == (np.diff(sin) >= 0)).all()
def test_input_error():
X = [[1, 2], [2, 3], [3, 4]]
y = [0, 1, 2]
gbdt = HistGradientBoostingRegressor(monotonic_cst=[1, 0, -1])
with pytest.raises(ValueError,
match='monotonic_cst has shape 3 but the input data'):
gbdt.fit(X, y)
for monotonic_cst in ([1, 3], [1, -3]):
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
with pytest.raises(ValueError,
match='must be None or an array-like of '
'-1, 0 or 1'):
gbdt.fit(X, y)
gbdt = HistGradientBoostingClassifier(monotonic_cst=[0, 1])
with pytest.raises(
ValueError,
match='monotonic constraints are not supported '
'for multiclass classification'
):
gbdt.fit(X, y)
def test_bounded_value_min_gain_to_split():
# The purpose of this test is to show that when computing the gain at a
# given split, the value of the current node should be properly bounded to
# respect the monotonic constraints, because it strongly interacts with
# min_gain_to_split. We build a simple example where gradients are [1, 1,
# 100, 1, 1] (hessians are all ones). The best split happens on the 3rd
# bin, and depending on whether the value of the node is bounded or not,
# the min_gain_to_split constraint is or isn't satisfied.
l2_regularization = 0
min_hessian_to_split = 0
min_samples_leaf = 1
n_bins = n_samples = 5
X_binned = np.arange(n_samples).reshape(-1, 1).astype(X_BINNED_DTYPE)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_hessians = np.ones(n_samples, dtype=G_H_DTYPE)
all_gradients = np.array([1, 1, 100, 1, 1], dtype=G_H_DTYPE)
sum_gradients = all_gradients.sum()
sum_hessians = all_hessians.sum()
hessians_are_constant = False
builder = HistogramBuilder(X_binned, n_bins, all_gradients,
all_hessians, hessians_are_constant)
n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1],
dtype=np.uint32)
has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1],
dtype=np.int8)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
children_lower_bound, children_upper_bound = -np.inf, np.inf
min_gain_to_split = 2000
splitter = Splitter(X_binned, n_bins_non_missing, missing_values_bin_idx,
has_missing_values, is_categorical, monotonic_cst,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split,
hessians_are_constant)
histograms = builder.compute_histograms_brute(sample_indices)
# Since the gradient array is [1, 1, 100, 1, 1]
# the max possible gain happens on the 3rd bin (or equivalently in the 2nd)
# and is equal to about 1307, which less than min_gain_to_split = 2000, so
# the node is considered unsplittable (gain = -1)
current_lower_bound, current_upper_bound = -np.inf, np.inf
value = compute_node_value(sum_gradients, sum_hessians,
current_lower_bound, current_upper_bound,
l2_regularization)
# the unbounded value is equal to -sum_gradients / sum_hessians
assert value == pytest.approx(-104 / 5)
split_info = splitter.find_node_split(n_samples, histograms,
sum_gradients, sum_hessians, value,
lower_bound=children_lower_bound,
upper_bound=children_upper_bound)
assert split_info.gain == -1 # min_gain_to_split not respected
# here again the max possible gain is on the 3rd bin but we now cap the
# value of the node into [-10, inf].
# This means the gain is now about 2430 which is more than the
# min_gain_to_split constraint.
current_lower_bound, current_upper_bound = -10, np.inf
value = compute_node_value(sum_gradients, sum_hessians,
current_lower_bound, current_upper_bound,
l2_regularization)
assert value == -10
split_info = splitter.find_node_split(n_samples, histograms,
sum_gradients, sum_hessians, value,
lower_bound=children_lower_bound,
upper_bound=children_upper_bound)
assert split_info.gain > min_gain_to_split
| bsd-3-clause |
laurendiperna/data-science-from-scratch | code/recommender_systems.py | 60 | 6291 | from __future__ import division
import math, random
from collections import defaultdict, Counter
from linear_algebra import dot
users_interests = [
["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]
]
popular_interests = Counter(interest
for user_interests in users_interests
for interest in user_interests).most_common()
def most_popular_new_interests(user_interests, max_results=5):
suggestions = [(interest, frequency)
for interest, frequency in popular_interests
if interest not in user_interests]
return suggestions[:max_results]
#
# user-based filtering
#
def cosine_similarity(v, w):
return dot(v, w) / math.sqrt(dot(v, v) * dot(w, w))
unique_interests = sorted(list({ interest
for user_interests in users_interests
for interest in user_interests }))
def make_user_interest_vector(user_interests):
"""given a list of interests, produce a vector whose i-th element is 1
if unique_interests[i] is in the list, 0 otherwise"""
return [1 if interest in user_interests else 0
for interest in unique_interests]
user_interest_matrix = map(make_user_interest_vector, users_interests)
user_similarities = [[cosine_similarity(interest_vector_i, interest_vector_j)
for interest_vector_j in user_interest_matrix]
for interest_vector_i in user_interest_matrix]
def most_similar_users_to(user_id):
pairs = [(other_user_id, similarity) # find other
for other_user_id, similarity in # users with
enumerate(user_similarities[user_id]) # nonzero
if user_id != other_user_id and similarity > 0] # similarity
return sorted(pairs, # sort them
key=lambda (_, similarity): similarity, # most similar
reverse=True) # first
def user_based_suggestions(user_id, include_current_interests=False):
# sum up the similarities
suggestions = defaultdict(float)
for other_user_id, similarity in most_similar_users_to(user_id):
for interest in users_interests[other_user_id]:
suggestions[interest] += similarity
# convert them to a sorted list
suggestions = sorted(suggestions.items(),
key=lambda (_, weight): weight,
reverse=True)
# and (maybe) exclude already-interests
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
#
# Item-Based Collaborative Filtering
#
interest_user_matrix = [[user_interest_vector[j]
for user_interest_vector in user_interest_matrix]
for j, _ in enumerate(unique_interests)]
interest_similarities = [[cosine_similarity(user_vector_i, user_vector_j)
for user_vector_j in interest_user_matrix]
for user_vector_i in interest_user_matrix]
def most_similar_interests_to(interest_id):
similarities = interest_similarities[interest_id]
pairs = [(unique_interests[other_interest_id], similarity)
for other_interest_id, similarity in enumerate(similarities)
if interest_id != other_interest_id and similarity > 0]
return sorted(pairs,
key=lambda (_, similarity): similarity,
reverse=True)
def item_based_suggestions(user_id, include_current_interests=False):
suggestions = defaultdict(float)
user_interest_vector = user_interest_matrix[user_id]
for interest_id, is_interested in enumerate(user_interest_vector):
if is_interested == 1:
similar_interests = most_similar_interests_to(interest_id)
for interest, similarity in similar_interests:
suggestions[interest] += similarity
suggestions = sorted(suggestions.items(),
key=lambda (_, similarity): similarity,
reverse=True)
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
if __name__ == "__main__":
print "Popular Interests"
print popular_interests
print
print "Most Popular New Interests"
print "already like:", ["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"]
print most_popular_new_interests(["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"])
print
print "already like:", ["R", "Python", "statistics", "regression", "probability"]
print most_popular_new_interests(["R", "Python", "statistics", "regression", "probability"])
print
print "User based similarity"
print "most similar to 0"
print most_similar_users_to(0)
print "Suggestions for 0"
print user_based_suggestions(0)
print
print "Item based similarity"
print "most similar to 'Big Data'"
print most_similar_interests_to(0)
print
print "suggestions for user 0"
print item_based_suggestions(0)
| unlicense |
Zsailer/zsplot | zsplot/kde2d.py | 1 | 2134 | import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.stats.kde import gaussian_kde
import numpy as np
def kde2d(x, y, color="C0", linewidths=1, axes=None, axis=None, **kwargs):
"""Make a 2d plot of scattered points and their 1d KDE on the sides of the plot.
Only works with matplotlib 2.0+
Parameters
----------
x : array
x data
y : array
y data
color : str
matplotlib color
linewidths : float
width of KDE lines
axes : 3-element list of axes
preformatted axes for each subplot (order=[scatter, xkde, ykde])
axis : 4 element list
axis for scatter plot
Keyword args get passed
Returns
-------
fig : Figure object
matplotlib Figure object
axes : 3 element list
axes for each subplot (order=[scatter, xkde, ykde])
"""
if axes == None:
fig = plt.figure(figsize=(7,7))
gs = gridspec.GridSpec(4, 4)
gs.update(hspace=0.3, wspace=0.3)
base = plt.subplot(gs[1:,0:3])
xkde = plt.subplot(gs[0:1,:3], sharex=base)
ykde = plt.subplot(gs[1:, 3:], sharey=base)
axes=[base, xkde, ykde]
else:
base, xkde, ykde = axes
fig = axes[0].get_figure()
xkd = gaussian_kde(x)
ykd = gaussian_kde(y)
xx = np.linspace(min(x), max(x), 1000)
yy = np.linspace(min(y), max(y), 1000)
px = xkd.pdf(xx)
py = ykd.pdf(yy)
px = px/sum(px)
py = py/sum(py)
# Set up base figure
base.plot(x,y, marker="o", linewidth=0, color=color, **kwargs)
base.spines["right"].set_visible(False)
base.spines["top"].set_visible(False)
if axis is not None:
base.axis(axis)
xkde.plot(xx, px, color=color, linewidth=linewidths)
xkde.spines["right"].set_visible(False)
xkde.spines["top"].set_visible(False)
plt.setp(xkde.get_xticklabels(), visible=False)
ykde.plot(py, yy, color=color, linewidth=linewidths)
ykde.spines["right"].set_visible(False)
ykde.spines["top"].set_visible(False)
plt.setp(ykde.get_yticklabels(), visible=False)
return fig, axes
| mit |
grlee77/scipy | scipy/stats/_distn_infrastructure.py | 2 | 135764 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
import sys
import keyword
import re
import types
import warnings
import inspect
from itertools import zip_longest
from scipy._lib import doccer
from scipy._lib._util import _lazywhere
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, entr, xlogy, ive)
# for root finding for continuous distribution ppf, and max likelihood
# estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
# for scipy.stats.entropy. Attempts to import just that function or file
# have cause import problems
from scipy import stats
from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
logical_and, log, sqrt, place, argmax, vectorize, asarray,
nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(k, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(k, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative distribution function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative distribution function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data)
Parameter estimates for generic data.
See `scipy.stats.rv_continuous.fit <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit.html#scipy.stats.rv_continuous.fit>`__ for detailed documentation of the
keyword arguments.
"""
_doc_expect = """\
expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains fraction alpha [0, 1] of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``. Note that shifting the location of a distribution
does not make it a "noncentral" distribution; noncentral generalizations of
some distributions are available in separate classes.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""kurtosis is fourth central moment / variance**2 - 3."""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
def _fit_determine_optimizer(optimizer):
if not callable(optimizer) and isinstance(optimizer, str):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError as e:
raise ValueError("%s is not a valid optimizer" % optimizer) from e
return optimizer
# Frozen RV class
class rv_frozen:
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.a, self.b = self.dist._get_support(*shapes)
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def support(self):
return self.dist.support(*self.args, **self.kwds)
def argsreduce(cond, *args):
"""Clean arguments to:
1. Ensure all arguments are iterable (arrays of dimension at least one
2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is
True, in 1D.
Return list of processed arguments.
Examples
--------
>>> rng = np.random.default_rng()
>>> A = rng.random((4, 5))
>>> B = 2
>>> C = rng.random((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(4, 5)
>>> B1.shape
(1,)
>>> C1.shape
(1, 5)
>>> cond[2,:] = 0
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(15,)
>>> B1.shape
(1,)
>>> C1.shape
(15,)
"""
# some distributions assume arguments are iterable.
newargs = np.atleast_1d(*args)
# np.atleast_1d returns an array if only one argument, or a list of arrays
# if more than one argument.
if not isinstance(newargs, list):
newargs = [newargs, ]
if np.all(cond):
# Nothing to do
return newargs
s = cond.shape
# np.extract returns flattened arrays, which are not broadcastable together
# unless they are either the same size or size == 1.
return [(arg if np.size(arg) == 1
else np.extract(cond, np.broadcast_to(arg, s)))
for arg in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# The function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the
# factor of exp(-xs*ns) into the ive function to improve numerical
# stability at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
# Return res + np.log(corr) avoiding np.log(0)
return _lazywhere(
corr > 0,
(res, corr),
f=lambda r, c: r + np.log(c),
fillvalue=-np.inf)
def _ncx2_pdf(x, df, nc):
# Copy of _ncx2_log_pdf avoiding np.log(0) when corr = 0
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
return np.exp(res) * corr
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic:
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super().__init__()
# figure out if _stats signature has 'moments' keyword
sig = _getfullargspec(self._stats)
self._stats_has_moments = ((sig.varkw is not None) or
('moments' in sig.args) or
('moments' in sig.kwonlyargs))
self._random_state = check_random_state(seed)
# For historical reasons, `size` was made an attribute that was read
# inside _rvs(). The code is being changed so that 'size'
# is an argument
# to self._rvs(). However some external (non-SciPy) distributions
# have not
# been updated. Maintain backwards compatibility by checking if
# the self._rvs() signature has the 'size' keyword, or a **kwarg,
# and if not set self._size inside self.rvs()
# before calling self._rvs().
argspec = inspect.getfullargspec(self._rvs)
self._rvs_uses_size_attribute = (argspec.varkw is None and
'size' not in argspec.args and
'size' not in argspec.kwonlyargs)
# Warn on first use only
self._rvs_size_warned = False
@property
def random_state(self):
"""Get or set the generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __setstate__(self, state):
try:
self.__dict__.update(state)
# attaches the dynamically created methods on each instance.
# if a subclass overrides rv_generic.__setstate__, or implements
# it's own _attach_methods, then it must make sure that
# _attach_argparser_methods is called.
self._attach_methods()
except ValueError:
# reconstitute an old pickle scipy<1.6, that contains
# (_ctor_param, random_state) as state
self._ctor_param = state[0]
self._random_state = state[1]
self.__init__()
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_* instance.
This method must be overridden by subclasses, and must itself call
_attach_argparser_methods. This method is called in __init__ in
subclasses, and in __setstate__
"""
raise NotImplementedError
def _attach_argparser_methods(self):
"""
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Should be called from `_attach_methods`, typically in __init__ and
during unpickling (__setstate__)
"""
ns = {}
exec(self._parse_arg_template, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name, types.MethodType(ns[name], self))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser string for the shape arguments.
This method should be called in __init__ of a class for each
distribution. It creates the `_parse_arg_template` attribute that is
then used by `_attach_argparser_methods` to dynamically create and
attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs`
methods to the instance.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, str):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getfullargspec(meth) # NB does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.varkw is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.kwonlyargs:
raise TypeError(
'kwonly args are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
# this string is used by _attach_argparser_methods
self._parse_arg_template = parse_arg_template % dct
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
try:
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
except TypeError as e:
raise Exception("Unable to construct docstring for "
"distribution \"%s\": %s" %
(self.name, repr(e))) from e
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Noncentral moments (also known as the moment about the origin).
# Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime".
# The primed mu is a widely used notation for the noncentral moment.
def _munp(self, n, *args):
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = self.generic_moment(n, *args)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters. %s, %s, %s" % (size, size_,
bcast_shape))
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
# These are the methods you must define (standard form functions)
# NB: generic _pdf, _logpdf, _cdf are different for
# rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _get_support(self, *args, **kwargs):
"""Return the support of the (unscaled, unshifted) distribution.
*Must* be overridden by distributions which have support dependent
upon the shape parameters of the distribution. Any such override
*must not* set or change any of the class members, as these members
are shared amongst all instances of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support for the specified
shape parameters.
"""
return self.a, self.b
def _support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a <= x) & (x <= b)
def _open_support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a < x) & (x < b)
def _rvs(self, *args, size=None, random_state=None):
# This method must handle size being a tuple, and it must
# properly broadcast *args and size. size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
# Use basic inverse cdf algorithm for RV generation as default.
U = random_state.uniform(size=size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
random_state = check_random_state(rndm)
else:
random_state = self._random_state
# Maintain backwards compatibility by setting self._size
# for distributions that still need it.
if self._rvs_uses_size_attribute:
if not self._rvs_size_warned:
warnings.warn(
f'The signature of {self._rvs} does not contain '
f'a "size" keyword. Such signatures are deprecated.',
np.VisibleDeprecationWarning)
self._rvs_size_warned = True
self._size = size
self._random_state = random_state
vals = self._rvs(*args)
else:
vals = self._rvs(*args, size=size, random_state=random_state)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = np.full(shape(cond), fill_value=self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
if g2 is None:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
# if mean is inf then var is also inf
with np.errstate(invalid='ignore'):
mu2 = np.where(np.isfinite(mu), mu2p - mu**2, np.inf)
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
with np.errstate(invalid='ignore'):
mu3 = (-mu*mu - 3*mu2)*mu + mu3p
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
with np.errstate(invalid='ignore'):
mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
with np.errstate(invalid='ignore'):
mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = [default.copy() for _ in moments]
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, scale, *args)
goodscale = goodargs[0]
goodargs = goodargs[1:]
place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
return output
def moment(self, n, *args, **kwds):
"""n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
def support(self, *args, **kwargs):
"""Support of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : array_like
end-points of the distribution's support.
"""
args, loc, scale = self._parse_args(*args, **kwargs)
arrs = np.broadcast_arrays(*args, loc, scale)
args, loc, scale = arrs[:-2], arrs[-2], arrs[-1]
cond = self._argcheck(*args) & (scale > 0)
_a, _b = self._get_support(*args)
if cond.all():
return _a * scale + loc, _b * scale + loc
elif cond.ndim == 0:
return self.badvalue, self.badvalue
# promote bounds to at least float to fill in the badvalue
_a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d')
out_a, out_b = _a * scale + loc, _b * scale + loc
place(out_a, 1-cond, self.badvalue)
place(out_b, 1-cond, self.badvalue)
return out_a, out_b
def _get_fixed_fit_value(kwds, names):
"""
Given names such as `['f0', 'fa', 'fix_a']`, check that there is
at most one non-None value in `kwds` associaed with those names.
Return that value, or None if none of the names occur in `kwds`.
As a side effect, all occurrences of those names in `kwds` are
removed.
"""
vals = [(name, kwds.pop(name)) for name in names if name in kwds]
if len(vals) > 1:
repeated = [name for name, val in vals]
raise ValueError("fit method got multiple keyword arguments to "
"specify the same fixed parameter: " +
', '.join(repeated))
return vals[0][1] if vals else None
# continuous random variables: implement maybe later
#
# hf --- Hazard Function (PDF / SF)
# chf --- Cumulative hazard function (-log(SF))
# psf --- Probability sparsity function (reciprocal of the pdf) in
# units of percent-point-function (as a function of q).
# Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
support
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of the distribution.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
For most of the scipy.stats distributions, the support interval doesn't
depend on the shape parameters. ``x`` being in the support interval is
equivalent to ``self.a <= x <= self.b``. If either of the endpoints of
the support do depend on the shape parameters, then
i) the distribution must implement the ``_get_support`` method; and
ii) those dependent endpoints must be omitted from the distribution's
call to the ``rv_continuous`` initializer.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,
applied to a uniform random variate. In order to generate random variates
efficiently, either the default ``_ppf`` needs to be overwritten (e.g.
if the inverse cdf can expressed in an explicit form) or a sampling
method needs to be implemented in a custom ``_rvs`` method.
If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.
The main reason would be to improve numerical accuracy: for example,
the survival function ``_sf`` is computed as ``1 - _cdf`` which can
result in loss of precision if ``_cdf(x)`` is close to one.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
_get_support
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
`rv_frozen` object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self.extradoc = extradoc
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
self._attach_methods()
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
# _random_state attribute is taken care of by rv_generic
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "vecentropy", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""
Attaches dynamically created methods to the rv_continuous instance.
"""
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
if self.moment_type == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
factor = 10.
left, right = self._get_support(*args)
if np.isinf(left):
left = min(-factor, right)
while self._ppf_to_solve(left, q, *args) > 0.:
left, right = left * factor, left
# left is now such that cdf(left) <= q
# if right has changed, then cdf(right) > q
if np.isinf(right):
right = max(factor, left)
while self._ppf_to_solve(right, q, *args) < 0.:
left, right = right, right * factor
# right is now such that cdf(right) >= q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._mom_integ0, _a, _b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
# Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._pdf, _a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
# generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
# in rv_generic
def pdf(self, x, *args, **kwds):
"""Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= np.asarray(_b)) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= _b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -np.sum(self._logpdf(x, *args), axis=0)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def nnlf(self, theta, x):
"""Negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x, *args)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpdf = self._logpdf(x, *args)
finite_logpdf = np.isfinite(logpdf)
n_bad += np.sum(~finite_logpdf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpdf[finite_logpdf], axis=0) + penalty
return -np.sum(logpdf, axis=0)
def _penalized_nnlf(self, theta, x):
"""Penalized negative loglikelihood function.
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
def _fitstart(self, data, args=None):
"""Starting point for fit (shape arguments + loc + scale)."""
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
def _reduce_func(self, args, kwds, data=None):
"""
Return the (possibly reduced) function to optimize in order to find MLE
estimates for the .fit method.
"""
# Convert fixed shape parameters to the standard numeric form: e.g. for
# stats.beta, shapes='a, b'. To fix `a`, the caller can give a value
# for `f0`, `fa` or 'fix_a'. The following converts the latter two
# into the first (numeric) form.
shapes = []
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
if val is not None:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
methods = {"mle", "mm"}
method = kwds.pop('method', "mle").lower()
if method == "mm":
n_params = len(shapes) + 2 - len(fixedn)
exponents = (np.arange(1, n_params+1))[:, np.newaxis]
data_moments = np.sum(data[None, :]**exponents/len(data), axis=1)
def objective(theta, x):
return self._moment_error(theta, x, data_moments)
elif method == "mle":
objective = self._penalized_nnlf
else:
raise ValueError("Method '{0}' not available; must be one of {1}"
.format(method, methods))
if len(fixedn) == 0:
func = objective
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return objective(newtheta, x)
return x0, func, restore, args
def _moment_error(self, theta, x, data_moments):
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale)
for i in range(len(data_moments))])
if np.any(np.isnan(dist_moments)):
raise ValueError("Method of moments encountered a non-finite "
"distribution moment and cannot continue. "
"Consider trying method='MLE'.")
return (((data_moments - dist_moments) /
np.maximum(np.abs(data_moments), 1e-8))**2).sum()
def fit(self, data, *args, **kwds):
"""
Return estimates of shape (if applicable), location, and scale
parameters from data. The default estimation method is Maximum
Likelihood Estimation (MLE), but Method of Moments (MM)
is also available.
Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in estimating the distribution parameters.
arg1, arg2, arg3,... : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
- `loc`: initial guess of the distribution's location parameter.
- `scale`: initial guess of the distribution's scale parameter.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use.
The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
- method : The method to use. The default is "MLE" (Maximum
Likelihood Estimate); "MM" (Method of Moments)
is also available.
Returns
-------
parameter_tuple : tuple of floats
Estimates for any shape parameters (if applicable),
followed by those for location and scale.
For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
With ``method="MLE"`` (default), the fit is computed by minimizing
the negative log-likelihood function. A large, finite penalty
(rather than infinite negative log-likelihood) is applied for
observations beyond the support of the distribution.
With ``method="MM"``, the fit is computed by minimizing the L2 norm
of the relative errors between the first *k* raw (about zero) data
moments and the corresponding distribution moments, where *k* is the
number of non-fixed parameters.
More precisely, the objective function is::
(((data_moments - dist_moments)
/ np.maximum(np.abs(data_moments), 1e-8))**2).sum()
where the constant ``1e-8`` avoids division by zero in case of
vanishing data moments. Typically, this error norm can be reduced to
zero.
Note that the standard method of moments can produce parameters for
which some data are outside the support of the fitted distribution;
this implementation does nothing to prevent this.
For either method,
the returned answer is not guaranteed to be globally optimal; it
may only be locally optimal, or the optimization may fail altogether.
If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``,
the `fit` method will raise a ``RuntimeError``.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc``
and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
data = np.asarray(data)
method = kwds.get('method', "mle").lower()
# memory for method of moments
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds, data=data)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
optimizer = _fit_determine_optimizer(optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# In some cases, method of moments can be done with fsolve/root
# instead of an optimizer, but sometimes no solution exists,
# especially when the user fixes parameters. Minimizing the sum
# of squares of the error generalizes to these cases.
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
obj = func(vals, data)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
loc, scale, shapes = self._unpack_loc_scale(vals)
if not (np.all(self._argcheck(*shapes)) and scale > 0):
raise Exception("Optimization converged to parameters that are "
"outside the range allowed by the distribution.")
if method == 'mm':
if not np.isfinite(obj):
raise Exception("Optimization failed: either a data moment "
"or fitted distribution moment is "
"non-finite.")
return vals
def _fit_loc_scale_support(self, data, *args):
"""Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
_a, _b = self._get_support(*args)
a, b = _a, _b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale
# estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
_a, _b = self._get_support(*args)
with np.errstate(over='ignore'):
h = integrate.quad(integ, _a, _b)[0]
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(_b):
upper = upp
else:
upper = _b
if np.isinf(_a):
lower = low
else:
lower = _a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution by numerical integration.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ub
E[f(x)] = Integral(f(x) * dist.pdf(x)),
lb
where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``
distribution. If the bounds ``lb`` and ``ub`` correspond to the
support of the distribution, e.g. ``[-inf, inf]`` in the default
case, then the integral is the unrestricted expectation of ``f(x)``.
Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``
outside a finite interval in which case the expectation is
calculated within the finite range ``[lb, ub]``.
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`scipy.integrate.quad`. Neither this function nor
`scipy.integrate.quad` can verify whether the integral exists or is
finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and
``cauchy(0).expect()`` returns ``0.0``.
The function is not vectorized.
Examples
--------
To understand the effect of the bounds of integration consider
>>> from scipy.stats import expon
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)
0.6321205588285578
This is close to
>>> expon(1).cdf(2.0) - expon(1).cdf(0.0)
0.6321205588285577
If ``conditional=True``
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)
1.0000000000000002
The slight deviation from 1 is due to numerical integration.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
_a, _b = self._get_support(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + _a * scale
if ub is None:
ub = loc + _b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
return vals
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
_a, _b = self._get_support(*args)
return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
_a, _b = self._get_support(*args)
b = _b
a = _a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= _b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= _a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero
probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``
and ``pk`` must have the same shape.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
support
Notes
-----
This class is similar to `rv_continuous`. Whether a shape parameter is
valid is decided by an ``_argcheck`` method (which defaults to checking
that its arguments are strictly positive.)
The main differences are:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_discrete instance."""
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = types.MethodType(_vec_generic_moment, self)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = types.MethodType(_vppf, self)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
_a, _b = self._get_support(*args)
m = arange(int(_a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super().rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, cond2*(cond0 == cond0), 1.0)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _a-1 + loc)
place(output, cond2, _b + loc)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _b)
place(output, cond2, _a-1)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return stats.entropy(self.pk)
else:
_a, _b = self._get_support(*args)
return _expect(lambda x: entr(self.pmf(x, *args)),
_a, _b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution by numerical summation.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``lb <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``lb <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or
may not exist,
depending on the function, `func`. If it does exist, but the
sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result,
but may also make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
_a, _b = self._get_support(*args)
if lb is None:
lb = _a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = _b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
if isinstance(self, rv_sample):
res = self._expect(fun, lb, ub)
return res / invfac
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is
infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if np.shape(xk) != np.shape(pk):
raise ValueError("xk and pk must have the same shape.")
if np.less(pk, 0.0).any():
raise ValueError("All elements of pk must be non-negative.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in rv_generic.__setstate__,
# which calls rv_generic._attach_methods
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created argparser methods."""
self._attach_argparser_methods()
def _get_support(self, *args):
"""Return the support of the (unscaled, unshifted) distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support.
"""
return self.a, self.b
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self, size=None, random_state=None):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = random_state.uniform(size=size)
if size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return stats.entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _expect(self, fun, lb, ub, *args, **kwds):
# ignore all args, just do a brute force summation
supp = self.xk[(lb <= self.xk) & (self.xk <= ub)]
vals = fun(supp)
return np.sum(vals)
def _check_shape(argshape, size):
"""
This is a utility function used by `_rvs()` in the class geninvgauss_gen.
It compares the tuple argshape to the tuple size.
Parameters
----------
argshape : tuple of integers
Shape of the arguments.
size : tuple of integers or integer
Size argument of rvs().
Returns
-------
The function returns two tuples, scalar_shape and bc.
scalar_shape : tuple
Shape to which the 1-d array of random variates returned by
_rvs_scalar() is converted when it is copied into the
output array of _rvs().
bc : tuple of booleans
bc is an tuple the same length as size. bc[j] is True if the data
associated with that index is generated in one call of _rvs_scalar().
"""
scalar_shape = []
bc = []
for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],
fillvalue=1):
if sizedim > argdim or (argdim == sizedim == 1):
scalar_shape.append(sizedim)
bc.append(True)
else:
bc.append(False)
return tuple(scalar_shape[::-1]), tuple(bc[::-1])
def get_distribution_names(namespace_pairs, rv_base_class):
"""Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| bsd-3-clause |
Barmaley-exe/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
odinaryk/Gungnir | advance.py | 1 | 2826 | #encoding=utf-8
#save function
#list name basket
#name is in the search box
import json
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import time
#the order of goods are tmall,amazon,jingdong,one
num=3
#save the price of each search
#if there is already some records of the search_name,
#add a new name
#else
#add a pair of new key-value to the dictionnary
def saveprice(basket,tname):
cur_time=time.strftime("%m-%d %H:%M",time.localtime(time.time()))
summary=0
if os.path.exists(r'saves.json'):
file_object = open("saves.json",'r+')
dic=json.load(file_object)
else:
dic={}
prices=[]
prices.append(cur_time)
oldprices=[]
pricelist=[]
names=['tmall','amazon','jd','one']
identify=['tmall','amazon','jd','yhd']
tmp=[[],[],[],[]]
print tname
for item in basket:
for i in range(len(identify)):
if item['source'].find(identify[i])!=-1:
tmp[i].append(float(item['price']))
for lst in tmp:
if len(lst)!=0:
prices.append(sum(lst)/len(lst))
else:
prices.append(-1)
if(dic.has_key(tname)):
oldprices=dic[tname]
oldprices.append(prices)
dic[tname]=oldprices
else:
print 1
pricelist.append(prices)
dic[tname]=pricelist
#write back
if os.path.exists(r'saves.json'):
file_object .close()
f=open("saves.json",'w')
json.dump(dic,f, encoding = "gb2312")
def drawpic(name):
file_object = open('saves.json','r')
dic=json.load(file_object)
file_object.close()
x_time=[]
y_tmall=[]
y_amazon=[]
y_jingdong=[]
y_one=[]
x=[]
if dic.has_key(name):
for i in range(0,len(dic[name])):
x_time.append(dic[name][i][0])
y_tmall.append(dic[name][i][1])
y_amazon.append(dic[name][i][2])
y_jingdong.append(dic[name][i][3])
y_one.append(dic[name][i][4])
x.append(0.3*(i+1))
for i in range(0,len(y_tmall)):
print y_tmall[i]
fig = plt.figure()
ax=plt.gca()
plt.xlabel('time')
plt.ylabel('average price')
plt.plot(x, y_tmall,"-or",label="tmall")
plt.plot(x, y_amazon,"-og",label='amazon' )
plt.plot(x, y_jingdong,"-ob" ,label='jd')
plt.plot(x, y_one,"-oc" ,label='one')
ax.legend(loc='best')
#ax.xaxis.set_major_locator(x_time)
ax.set_title('history price')
ticks = ax.set_xticks(x)
ax.set_xticklabels(x_time,rotation=30, fontsize='small')
plt.show()
def creatdic():
f=open('oneq.json','a+')
dic=json.load(f)
return dic
def main():
name="饼干"
basket=creatdic()
saveprice(basket,name,num)
drawpic(name)
if __name__ == "__main__":
main()
| gpl-3.0 |
3manuek/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
procoder317/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
kristoforcarlson/nest-simulator-fork | pynest/nest/tests/test_connect_helpers.py | 2 | 17422 | # -*- coding: utf-8 -*-
#
# test_connect_helpers.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import scipy.stats
import nest
import matplotlib.pylab as plt
from scipy.stats import truncexpon
try:
from mpi4py import MPI
haveMPI4Py = True
except:
haveMPI4Py = False
def gather_data(data_array):
'''
Gathers data from all mpi processes by collecting all element in a list if data is a list and summing all elements to one numpy-array if data is one numpy-array.
Returns gathered data if rank of current mpi node is zero and None otherwise.
'''
if haveMPI4Py:
data_array_list = MPI.COMM_WORLD.gather(data_array, root=0)
if MPI.COMM_WORLD.Get_rank() == 0:
if isinstance(data_array, list):
gathered_data = [item for sublist in data_array_list for item in sublist]
else:
gathered_data = sum(data_array_list)
return gathered_data
else:
return None
else:
return data_array
def is_array(data):
'''
Returns True if data is a list or numpy-array and False otherwise.
'''
return isinstance(data, (list, np.ndarray, np.generic))
def mpi_barrier():
if haveMPI4Py:
MPI.COMM_WORLD.Barrier()
def mpi_assert(data_original, data_test, TestCase):
'''
Compares data_original and data_test using assertTrue from the TestCase.
'''
data_original = gather_data(data_original)
# only test if on rank 0
if data_original is not None:
if isinstance(data_original, (np.ndarray, np.generic)) and isinstance(data_test, (np.ndarray, np.generic)):
TestCase.assertTrue(np.allclose(data_original, data_test))
else:
TestCase.assertTrue(data_original == data_test)
def all_equal(x):
'''
Tests if all elements in a list are equal.
Returns True or False
'''
return x.count(x[0]) == len(x)
def get_connectivity_matrix(pop1,pop2):
'''
Returns a connectivity matrix describing all connections from pop1 to pop2
such that M_ij describes the connection between the jth neuron in pop1 to the
ith neuron in pop2.
'''
M = np.zeros((len(pop2),len(pop1)))
connections = nest.GetConnections(pop1,pop2)
index_dic = {}
pop1 = np.asarray(pop1)
pop2 = np.asarray(pop2)
for node in pop1:
index_dic[node] = np.where(pop1==node)[0][0]
for node in pop2:
index_dic[node] = np.where(pop2==node)[0][0]
for conn in connections:
source_id = conn[0]
target_id = conn[1]
M[index_dic[target_id]][index_dic[source_id]] += 1
return M
def get_weighted_connectivity_matrix(pop1,pop2,label):
'''
Returns a weighted connectivity matrix describing all connections from pop1 to pop2
such that M_ij describes the connection between the jth neuron in pop1 to the ith
neuron in pop2.
Only works without multapses.
'''
M = np.zeros((len(pop2),len(pop1)))
connections = nest.GetConnections(pop1,pop2)
index_dic = {}
pop1 = np.asarray(pop1)
pop2 = np.asarray(pop2)
for node in pop1:
index_dic[node] = np.where(pop1==node)[0][0]
for node in pop2:
index_dic[node] = np.where(pop2==node)[0][0]
for conn in connections:
source_id = conn[0]
target_id = conn[1]
weight = nest.GetStatus(nest.GetConnections([source_id],[target_id]))[0][label]
M[index_dic[target_id]][index_dic[source_id]] += weight
return M
def check_synapse(params, values, syn_params, TestCase):
for i, param in enumerate(params):
syn_params[param] = values[i]
TestCase.setUpNetwork(TestCase.conn_dict, syn_params)
for i, param in enumerate(params):
conns = nest.GetStatus(nest.GetConnections(TestCase.pop1, TestCase.pop2))
conn_params = [conn[param] for conn in conns]
TestCase.assertTrue(all_equal(conn_params))
TestCase.assertTrue(conn_params[0] == values[i])
# copied from Masterthesis, Daniel Hjertholm
def counter(x, fan, source_pop, target_pop):
'''
Count similar elements in list.
Parameters
----------
x: Any list.
Return values
-------------
list containing counts of similar elements.
'''
N_p = len(source_pop) if fan == 'in' else len(target_pop) # of pool nodes.
start = min(x)
counts = [0] * N_p
for elem in x:
counts[elem - start] += 1
return counts
def get_degrees(fan, pop1, pop2):
M = get_connectivity_matrix(pop1, pop2)
if fan == 'in':
degrees = np.sum(M,axis=1)
elif fan == 'out':
degrees = np.sum(M,axis=0)
return degrees
# adapted from Masterthesis, Daniel Hjertholm
def get_expected_degrees_fixedDegrees(N, fan, len_source_pop, len_target_pop):
N_d = len_target_pop if fan == 'in' else len_source_pop # of driver nodes.
N_p = len_source_pop if fan == 'in' else len_target_pop # of pool nodes.
expected_degree = N_d * N / float(N_p)
expected = [expected_degree] * N_p
return expected
# adapted from Masterthesis, Daniel Hjertholm
def get_expected_degrees_totalNumber(N, fan, len_source_pop, len_target_pop):
expected_indegree = [N / float(len_target_pop)] * len_target_pop
expected_outdegree = [N / float(len_source_pop)] * len_source_pop
if fan == 'in':
return expected_indegree
elif fan == 'out':
return expected_outdegree
# copied from Masterthesis, Daniel Hjertholm
def get_expected_degrees_bernoulli(p, fan, len_source_pop, len_target_pop):
'''
Calculate expected degree distribution.
Degrees with expected number of observations below e_min are combined
into larger bins.
Return values
-------------
2D array. The four columns contain degree,
expected number of observation, actual number observations, and
the number of bins combined.
'''
n = len_source_pop if fan == 'in' else len_target_pop
n_p = len_target_pop if fan == 'in' else len_source_pop
mid = int(round(n * p))
e_min = 5
# Combine from front.
data_front = []
cumexp = 0.0
bins_combined = 0
for degree in range(mid):
cumexp += scipy.stats.binom.pmf(degree, n, p) * n_p
bins_combined += 1
if cumexp < e_min:
if degree == mid - 1:
if len(data_front) == 0:
raise RuntimeWarning('Not enough data')
deg, exp, obs, num = data_front[-1]
data_front[-1] = (deg, exp + cumexp, obs,
num + bins_combined)
else:
continue
else:
data_front.append((degree - bins_combined + 1, cumexp, 0,
bins_combined))
cumexp = 0.0
bins_combined = 0
# Combine from back.
data_back = []
cumexp = 0.0
bins_combined = 0
for degree in reversed(range(mid, n + 1)):
cumexp += scipy.stats.binom.pmf(degree, n, p) * n_p
bins_combined += 1
if cumexp < e_min:
if degree == mid:
if len(data_back) == 0:
raise RuntimeWarning('Not enough data')
deg, exp, obs, num = data_back[-1]
data_back[-1] = (degree, exp + cumexp, obs,
num + bins_combined)
else:
continue
else:
data_back.append((degree, cumexp, 0, bins_combined))
cumexp = 0.0
bins_combined = 0
data_back.reverse()
expected = np.array(data_front + data_back)
if fan == 'out':
assert (sum(expected[:, 3]) == len_target_pop + 1)
else: #, 'Something is wrong'
assert (sum(expected[:, 3]) == len_source_pop + 1)
return expected#np.hstack((np.asarray(data_front)[0], np.asarray(data_back)[0]))
# adapted from Masterthesis, Daniel Hjertholm
def reset_seed(seed, nr_threads):
'''
Reset the simulator and seed the PRNGs.
Parameters
----------
seed: PRNG seed value.
'''
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': nr_threads})
nr_procs = nest.GetStatus([0])[0]['total_num_virtual_procs']
seeds = [((nr_procs+1)*seed + k) for k in range(nr_procs)]
nest.SetKernelStatus({'rng_seeds': seeds})
nest.SetKernelStatus({'grng_seed': nr_procs*(seed+1) + seed})
# copied from Masterthesis, Daniel Hjertholm
def chi_squared_check(degrees, expected, distribution=None):
'''
Create a single network and compare the resulting degree distribution
with the expected distribution using Pearson's chi-squared GOF test.
Parameters
----------
seed : PRNG seed value.
control: Boolean value. If True, _generate_multinomial_degrees will
be used instead of _get_degrees.
Return values
-------------
chi-squared statistic.
p-value from chi-squared test.
'''
if distribution == 'pairwise_bernoulli':
observed = {}
for degree in degrees:
if not degree in observed:
observed[degree] = 1
else:
observed[degree] += 1
# Add observations to data structure, combining multiple observations
# where necessary.
expected[:, 2] = 0.0
for row in expected:
for i in range(int(row[3])):
deg = int(row[0]) + i
if deg in observed:
row[2] += observed[deg]
# ddof: adjustment to the degrees of freedom. df = k-1-ddof
return scipy.stats.chisquare(np.array(expected[:, 2]),
np.array(expected[:, 1]))
else:
# ddof: adjustment to the degrees of freedom. df = k-1-ddof
return scipy.stats.chisquare(np.array(degrees), np.array(expected))
# copied from Masterthesis, Daniel Hjertholm
def two_level_check(n_runs, degrees, expected, verbose=True):
'''
Create a network and run chi-squared GOF test n_runs times.
Test whether resulting p-values are uniformly distributed
on [0, 1] using the Kolmogorov-Smirnov GOF test.
Parameters
----------
n_runs : Number of times to repeat chi-squared test.
start_seed: First PRNG seed value.
control : Boolean value. If True, _generate_multinomial_degrees
will be used instead of _get_degrees.
verbose : Boolean value, determining whether to print progress.
Return values
-------------
KS statistic.
p-value from KS test.
'''
pvalues = []
for i in range(n_runs):
if verbose: print("Running test %d of %d." % (i + 1, n_runs))
chi, p = chi_squared_check(degrees, expected)
pvalues.append(p)
ks, p = scipy.stats.kstest(pvalues, 'uniform')
return ks, p
# copied from Masterthesis, Daniel Hjertholm
def adaptive_check(stat_dict, degrees, expected):
'''
Create a single network using Random{Con/Di}vergentConnect
and run a chi-squared GOF test on the connection distribution.
If the result is extreme (high or low), run a two-level test.
Parameters
----------
test : Instance of RCC_tester or RDC_tester class.
n_runs: If chi-square test fails, test is repeated n_runs times,
and the KS test is used to analyze results.
Return values
-------------
boolean value. True if test was passed, False otherwise.
'''
chi, p = chi_squared_check(degrees, expected)
if stat_dict['alpha1_low'] < p < stat_dict['alpha1_up']:
return True
else:
ks, p = two_level_check(stat_dict['n_runs'], degrees, expected)
return True if p > stat_dict['alpha2'] else False
def get_clipped_cdf(params):
def clipped_cdf(x):
if params['distribution'] == 'lognormal_clipped':
cdf_low = scipy.stats.lognorm.cdf(params['low'],params['sigma'],0,np.exp(params['mu']))
cdf_x = scipy.stats.lognorm.cdf(x,params['sigma'],0,np.exp(params['mu']))
cdf_high = scipy.stats.lognorm.cdf(params['high'],params['sigma'],0,np.exp(params['mu']))
elif params['distribution'] == 'exponential_clipped':
cdf_low = scipy.stats.expon.cdf(params['low'],0,1./params['lambda'])
cdf_x = scipy.stats.expon.cdf(x,0,1./params['lambda'])
cdf_high = scipy.stats.expon.cdf(params['high'],0,1./params['lambda'])
elif params['distribution'] == 'gamma_clipped':
cdf_low = scipy.stats.gamma.cdf(params['low'],params['order'],0,params['scale'])
cdf_x = scipy.stats.gamma.cdf(x,params['order'],0,params['scale'])
cdf_high = scipy.stats.gamma.cdf(params['high'],params['order'],0,params['scale'])
else:
raise ValueError("Clipped {} distribution not supported.".format(params['distribution']))
cdf = (cdf_x - cdf_low) / (cdf_high - cdf_low)
cdf[cdf < 0] = 0
cdf[cdf > 1] = 1
return cdf
return clipped_cdf
def get_expected_freqs(params,x,N):
if params['distribution'] == 'uniform_int':
expected = scipy.stats.randint.pmf(x,params['low'],params['high']+1)*N
elif params['distribution'] == 'binomial' or params['distribution'] == 'binomial_clipped':
expected = scipy.stats.binom.pmf(x,params['n'],params['p'])*N
elif params['distribution'] == 'poisson':
expected = scipy.stats.poisson.pmf(x,params['lambda'])*N
elif params['distribution'] == 'poisson_clipped':
x = np.arange(scipy.stats.poisson.ppf(1e-8, params['lambda']),scipy.stats.poisson.ppf(0.9999, params['lambda']))
expected = scipy.stats.poisson.pmf(x,params['lambda'])
expected = expected[np.where(x<=params['high'])]
x = x[np.where(x<=params['high'])]
expected = expected[np.where(x>=params['low'])]
expected = expected/np.sum(expected)*N
return expected
def check_ks(pop1, pop2, label, alpha, params):
clipped_dists = ['exponential_clipped', 'gamma_clipped', 'lognormal_clipped']
discrete_dists = ['binomial', 'poisson', 'uniform_int','binomial_clipped', 'poisson_clipped']
M = get_weighted_connectivity_matrix(pop1,pop2,label)
M = M.flatten()
if params['distribution'] in discrete_dists:
frequencies = scipy.stats.itemfreq(M)
expected = get_expected_freqs(params, frequencies[:, 0], len(M))
chi, p = scipy.stats.chisquare(frequencies[:, 1],expected)
elif params['distribution'] in clipped_dists:
D, p = scipy.stats.kstest(M, get_clipped_cdf(params), alternative='two-sided')
else:
if params['distribution'] == 'normal':
distrib = scipy.stats.norm
args = params['mu'],params['sigma']
elif params['distribution'] == 'normal_clipped':
distrib = scipy.stats.truncnorm
args = ((params['low']-params['mu'])/params['sigma'] if 'low' in params else -np.inf,
(params['high']-params['mu'])/params['sigma'] if 'high' in params else -np.inf,
params['mu'], params['sigma'])
elif params['distribution'] == 'exponential':
distrib = scipy.stats.expon
args = 0, 1./params['lambda']
elif params['distribution'] == 'gamma':
distrib = scipy.stats.gamma
args = params['order'], 0, params['scale']
elif params['distribution'] == 'lognormal':
distrib = scipy.stats.lognorm
args = params['sigma'], 0, np.exp(params['mu'])
elif params['distribution'] == 'uniform':
distrib = scipy.stats.uniform
args = params['low'], params['high']-params['low']
else:
raise ValueError("{} distribution not supported.".format(params['distribution']))
D, p = scipy.stats.kstest(M, distrib.cdf, args=args, alternative='two-sided')
return p > alpha
| gpl-2.0 |
cavestruz/L500analysis | plotting/profiles/T_Vr_evolution/Ttot_Vr_evolution/plot_Ttot_Vr_nu_binned_r200m.py | 1 | 3609 | from L500analysis.data_io.get_cluster_data import GetClusterData
from L500analysis.utils.utils import aexp2redshift
from L500analysis.plotting.tools.figure_formatting import *
from L500analysis.plotting.profiles.tools.profiles_percentile \
import *
from L500analysis.plotting.profiles.tools.select_profiles \
import nu_cut, prune_dict
from L500analysis.utils.constants import rbins, linear_rbins
from derived_field_functions import *
color = matplotlib.cm.afmhot_r
matplotlib.rcParams['legend.handlelength'] = 0
matplotlib.rcParams['legend.numpoints'] = 1
matplotlib.rcParams['legend.fontsize'] = 12
aexps = [1.0,0.9,0.8,0.7,0.6,0.5,0.45,0.4,0.35]
nu_threshold = {0:[1.3,2.],1:[2.,2.5],2:[2.5,3.6]}# 1.3, 2, 2.5, 3.6
nu_threshold_key = 0
nu_label = r"%0.1f$\leq\nu_{200m}\leq$%0.1f"%(nu_threshold[nu_threshold_key][0],
nu_threshold[nu_threshold_key][1])
db_name = 'L500_NR_0'
db_dir = '/home/babyostrich/Documents/Repos/L500analysis/'
profiles_list = ['T_mw', 'r_mid',
'vel_gas_rad_std', 'vel_gas_tan_std',
'vel_gas_rad_avg', 'vel_gas_tan_avg',
'Ttot_cm_per_s_2_r200m',
'Vr2_cm_per_s_2_r200m',
'R/R200m']
halo_properties_list=['r200m','M_total_200m','nu_200m']
Ttot_Vr2_ratio=r"$\Xi=T_{tot}/V^2_{r}$"
fXz1=r"$\Xi/\Xi(z=1)$"
pa = PlotAxes(figname='Ttot_Vr2_ratio_200m_nu%01d'%nu_threshold_key,
axes=[[0.15,0.4,0.80,0.55],[0.15,0.15,0.80,0.24]],
axes_labels=[Ttot_Vr2_ratio,fXz1],
xlabel=r"$R/R_{200m}$",
ylog=[True,False],
xlim=(0.2,2),
ylims=[(1e-1,1e2),(0.4,1.6)])
TratioV2={}
plots=[TratioV2]
clkeys=['Ttot_Vr2_ratio_200m']
for aexp in aexps :
cldata = GetClusterData(aexp=aexp,db_name=db_name,
db_dir=db_dir,
profiles_list=profiles_list,
halo_properties_list=halo_properties_list)
nu_cut_hids = nu_cut(nu=cldata['nu_200m'], threshold=nu_threshold[nu_threshold_key])
Ttot = calculate_profiles_mean_variance(prune_dict(d=cldata['Ttot_cm_per_s_2_r200m'],
k=nu_cut_hids))
Vr2 = calculate_profiles_mean_variance(prune_dict(d=cldata['Vr2_cm_per_s_2_r200m'],
k=nu_cut_hids))
TratioV2[aexp] = get_profiles_division_mean_variance(
mean_profile1=Ttot['mean'], var_profile1=Ttot['var'],
mean_profile2=Vr2['mean'], var_profile2=Vr2['var'])
pa.axes[Ttot_Vr2_ratio].plot( rbins, TratioV2[aexp]['mean'],
color=color(aexp),ls='-',
label="$z=%3.1f$" % aexp2redshift(aexp))
for aexp in aexps :
fractional_evolution = get_profiles_division_mean_variance(
mean_profile1=TratioV2[aexp]['mean'],
var_profile1=TratioV2[aexp]['var'],
mean_profile2=TratioV2[0.5]['mean'],
var_profile2=TratioV2[0.5]['var'],
)
pa.axes[fXz1].plot( rbins, fractional_evolution['mean'],
color=color(aexp),ls='-')
pa.axes[Ttot_Vr2_ratio].text(0.2,50.,nu_label)
pa.axes[Ttot_Vr2_ratio].tick_params(labelsize=12)
pa.axes[Ttot_Vr2_ratio].tick_params(labelsize=12)
pa.axes[fXz1].set_yticks(arange(0.6,1.4,0.2))
pa.set_legend(axes_label=Ttot_Vr2_ratio,ncol=3,loc='best', frameon=False)
pa.color_legend_texts(axes_label=Ttot_Vr2_ratio)
pa.savefig()
| mit |
aarchiba/numpy | numpy/doc/creation.py | 118 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
lixun910/pysal | pysal/lib/cg/ops/tests/test_tabular.py | 1 | 2367 | import unittest as ut
from .. import tabular as ta
from ....common import RTOL, ATOL, pandas, requires as _requires
from ....examples import get_path
from ...shapes import Polygon
from ....io import geotable as pdio
from ... import ops as GIS
import numpy as np
try:
import shapely as shp
except ImportError:
shp = None
PANDAS_EXTINCT = pandas is None
SHAPELY_EXTINCT = shp is None
@ut.skipIf(PANDAS_EXTINCT or SHAPELY_EXTINCT, 'missing pandas or shapely')
class Test_Tabular(ut.TestCase):
def setUp(self):
import pandas as pd
self.columbus = pdio.read_files(get_path('columbus.shp'))
grid = [Polygon([(0,0),(0,1),(1,1),(1,0)]),
Polygon([(0,1),(0,2),(1,2),(1,1)]),
Polygon([(1,2),(2,2),(2,1),(1,1)]),
Polygon([(1,1),(2,1),(2,0),(1,0)])]
regime = [0,0,1,1]
ids = list(range(4))
data = np.array((regime, ids)).T
self.exdf = pd.DataFrame(data, columns=['regime', 'ids'])
self.exdf['geometry'] = grid
@_requires('geopandas')
def test_round_trip(self):
import geopandas as gpd
import pandas as pd
geodf = GIS.tabular.to_gdf(self.columbus)
self.assertIsInstance(geodf, gpd.GeoDataFrame)
new_df = GIS.tabular.to_df(geodf)
self.assertIsInstance(new_df, pd.DataFrame)
for new, old in zip(new_df.geometry, self.columbus.geometry):
self.assertEqual(new, old)
def test_spatial_join(self):
pass
def test_spatial_overlay(self):
pass
def test_dissolve(self):
out = GIS.tabular.dissolve(self.exdf, by='regime')
self.assertEqual(out[0].area, 2.0)
self.assertEqual(out[1].area, 2.0)
answer_vertices0 = [(0,0), (0,1), (0,2), (1,2), (1,1), (1,0), (0,0)]
answer_vertices1 = [(2,1), (2,0), (1,0), (1,1), (1,2), (2,2), (2,1)]
np.testing.assert_allclose(out[0].vertices, answer_vertices0)
np.testing.assert_allclose(out[1].vertices, answer_vertices1)
def test_clip(self):
pass
def test_erase(self):
pass
def test_union(self):
new_geom = GIS.tabular.union(self.exdf)
self.assertEqual(new_geom.area, 4)
def test_intersection(self):
pass
def test_symmetric_difference(self):
pass
def test_difference(self):
pass
| bsd-3-clause |
chengsoonong/mclass-sky | projects/david/lab/logistic_regression.py | 2 | 3101 | import numpy as np
import accpm
import pandas as pd
import scipy.optimize as opt
from scipy.special import expit # The logistic sigmoid function
def cost(w, X, y, c=0):
"""
Returns the cross-entropy error function with (optional) sum-of-squares regularization term.
w -- parameters
X -- dataset of features where each row corresponds to a single sample
y -- dataset of labels where each row corresponds to a single sample
c -- regularization coefficient (default = 0)
"""
outputs = expit(X.dot(w)) # Vector of outputs (or predictions)
return -( y.transpose().dot(np.log(outputs)) + (1-y).transpose().dot(np.log(1-outputs)) ) + c*0.5*w.dot(w)
def grad(w, X, y, c=0):
"""
Returns the gradient of the cross-entropy error function with (optional) sum-of-squares regularization term.
"""
outputs = expit(X.dot(w))
return X.transpose().dot(outputs-y) + c*w
def train(X, y,c=0):
"""
Returns the vector of parameters which minimizes the error function via the BFGS algorithm.
"""
initial_values = np.zeros(X.shape[1]) # Error occurs if inital_values is set too high
return opt.minimize(cost, initial_values, jac=grad, args=(X,y,c),
method='BFGS', options={'disp' : False, 'gtol' : 1e-03}).x
def predict(w, X):
"""
Returns a vector of predictions.
"""
return expit(X.dot(w))
def compute_accuracy(predictions, y):
"""
predictions -- dataset of predictions (or outputs) from a model
y -- dataset of labels where each row corresponds to a single sample
"""
predictions = predictions.round()
size = predictions.shape[0]
results = predictions == y
correct = np.count_nonzero(results)
accuracy = correct/size
return accuracy
def compute_weights(X_training, Y_training, iterations):
weights = []
size = X_training.shape[0]
index = np.arange(size)
np.random.shuffle(index)
for i in range(1, iterations + 1):
X_i, Y_i = X_training[index[:i]], Y_training[index[:i]]
weight = train(X_i, Y_i)
weights.append(weight)
return weights
def weights_matrix(n, iterations, X_training, Y_training):
matrix_of_weights = []
np.random.seed(1)
for i in range(n):
weights = compute_weights(X_training, Y_training, iterations)
matrix_of_weights.append(weights)
return np.array(matrix_of_weights)
def experiment(n, iterations, X_testing, Y_testing, X_training, Y_training):
matrix_of_weights = weights_matrix(n, iterations, X_training, Y_training)
matrix_of_accuracies = []
for weights in matrix_of_weights:
accuracies = []
for weight in weights:
predictions = predict(weight, X_testing)
accuracy = compute_accuracy(predictions, Y_testing)
accuracies.append(accuracy)
matrix_of_accuracies.append(accuracies)
matrix_of_accuracies = np.array(matrix_of_accuracies)
sum_of_accuracies = matrix_of_accuracies.sum(axis=0)
average_accuracies = sum_of_accuracies/n
return average_accuracies
| bsd-3-clause |
mwoc/pydna | dna/test/test_DoublePinchHex.py | 1 | 2089 | import components as comp
from model import DnaModel
# For plotting:
from numpy import linspace
import matplotlib.pyplot as plt
def round_down(num, divisor):
return num - (num%divisor)
def round_up(num, divisor):
return num + (num%divisor)
# Actual test:
class DoublePinchHexTest(DnaModel):
def run(self):
heatex = self.addComponent(comp.PinchHex, 'heatex').nodes(1, 2, 3, 4)
self.nodes[1].update({
'media': 'hitec',
't': 430,
'p': 0.857
})
self.nodes[3].update({
'media': 'kalina',
'y': 0.7,
't': 85,
'p': 100,
'mdot': 1
})
heatex.calc(Nseg = 11, dTmin = 5)
return self
def plot(self):
print('Plotting...')
result = self.result['heatex']
_title = '{0} - Pinch: {1:.2f}, eff: {2:.2%}, Q: {3:.2f} [kW]'.format('heatex'.capitalize(), result['dTmin'], result['eff'], result['Q'])
# Plot
x = linspace(0, 1, len(result['Th']))
miny = round_down(min(min(result['Tc']), min(result['Th']))-1, 10)
maxy = round_up(max(max(result['Tc']), max(result['Th']))+1, 10)
plt.plot(x, result['Th'], 'r->', label = 'Hot')
plt.plot(x, result['Tc'], 'b-<', label = 'Cold')
plt.xlabel('Location in HEX')
plt.ylabel(r'Temperature [$^\circ$C]')
plt.title(_title)
plt.ylim(miny, maxy)
plt.grid(True)
plt.savefig('../output/dblPinchHexTest.png')
plt.close()
return self
def analyse(self):
n = self.nodes
print('Hot inlet: ',n[1])
print('Hot outlet: ',n[2])
print('Hot mdot:', n[1]['mdot'], '(expected ~5.8)')
print('Energy difference: ', n[1]['mdot'] * (n[2]['h'] - n[1]['h']),' (expected -2245.094)')
print('Cold inlet: ',n[3])
print('Cold outlet: ',n[4])
print('Cold mdot:', n[3]['mdot'], '(expected ~1)')
print('Energy difference: ', n[3]['mdot'] * (n[4]['h'] - n[3]['h']),' (expected 2245.094)')
return self | bsd-3-clause |
joergdietrich/astropy | astropy/visualization/tests/test_lupton_rgb.py | 2 | 9343 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for RGB Images
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os
import tempfile
import numpy as np
from numpy.testing import assert_equal
from ...convolution import convolve, Gaussian2DKernel
from ...tests.helper import pytest
from .. import lupton_rgb
try:
import matplotlib
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
# Set display=True to get matplotlib imshow windows to help with debugging.
display = False
def display_rgb(rgb, title=None):
"""Display an rgb image using matplotlib (useful for debugging)"""
import matplotlib.pyplot as plt
plt.imshow(rgb, interpolation='nearest', origin='lower')
if title:
plt.title(title)
plt.show()
return plt
def saturate(image, satValue):
"""
Return image with all points above satValue set to NaN.
Simulates saturation on an image, so we can test 'replace_saturated_pixels'
"""
result = image.copy()
saturated = image > satValue
result[saturated] = np.nan
return result
def random_array(dtype, N=100):
return np.array(np.random.random(10)*100, dtype=dtype)
def test_compute_intensity_1_float():
image_r = random_array(np.float64)
intensity = lupton_rgb.compute_intensity(image_r)
assert image_r.dtype == intensity.dtype
assert_equal(image_r, intensity)
def test_compute_intensity_1_uint():
image_r = random_array(np.uint8)
intensity = lupton_rgb.compute_intensity(image_r)
assert image_r.dtype == intensity.dtype
assert_equal(image_r, intensity)
def test_compute_intensity_3_float():
image_r = random_array(np.float64)
image_g = random_array(np.float64)
image_b = random_array(np.float64)
intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b)
assert image_r.dtype == intensity.dtype
assert_equal(intensity, (image_r+image_g+image_b)/3.0)
def test_compute_intensity_3_uint():
image_r = random_array(np.uint8)
image_g = random_array(np.uint8)
image_b = random_array(np.uint8)
intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b)
assert image_r.dtype == intensity.dtype
assert_equal(intensity, (image_r+image_g+image_b)//3)
class TestLuptonRgb(object):
"""A test case for Rgb"""
def setup_method(self, method):
np.random.seed(1000) # so we always get the same images.
self.min_, self.stretch_, self.Q = 0, 5, 20 # asinh
width, height = 85, 75
self.width = width
self.height = height
shape = (width, height)
image_r = np.zeros(shape)
image_g = np.zeros(shape)
image_b = np.zeros(shape)
# pixel locations, values and colors
points = [[15, 15], [50, 45], [30, 30], [45, 15]]
values = [1000, 5500, 600, 20000]
g_r = [1.0, -1.0, 1.0, 1.0]
r_i = [2.0, -0.5, 2.5, 1.0]
# Put pixels in the images.
for p, v, gr, ri in zip(points, values, g_r, r_i):
image_r[p[0], p[1]] = v*pow(10, 0.4*ri)
image_g[p[0], p[1]] = v*pow(10, 0.4*gr)
image_b[p[0], p[1]] = v
# convolve the image with a reasonable PSF, and add Gaussian background noise
def convolve_with_noise(image, psf):
convolvedImage = convolve(image, psf, boundary='extend', normalize_kernel=True)
randomImage = np.random.normal(0, 2, image.shape)
return randomImage + convolvedImage
psf = Gaussian2DKernel(2.5)
self.image_r = convolve_with_noise(image_r, psf)
self.image_g = convolve_with_noise(image_g, psf)
self.image_b = convolve_with_noise(image_b, psf)
def test_Asinh(self):
"""Test creating an RGB image using an asinh stretch"""
asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q)
rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscale(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensity(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale on the intensity"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensityPedestal(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale on the intensity
where the images each have a pedestal added"""
pedestal = [100, 400, -400]
self.image_r += pedestal[0]
self.image_g += pedestal[1]
self.image_b += pedestal[2]
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b, pedestal=pedestal)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensityBW(self):
"""Test creating a black-and-white image using an asinh stretch estimated
using zscale on the intensity"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r)
rgbImage = map.make_rgb_image(self.image_r, self.image_r, self.image_r)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_make_rgb(self):
"""Test the function that does it all"""
satValue = 1000.0
with tempfile.NamedTemporaryFile(suffix=".png") as temp:
red = saturate(self.image_r, satValue)
green = saturate(self.image_g, satValue)
blue = saturate(self.image_b, satValue)
lupton_rgb.make_lupton_rgb(red, green, blue, self.min_, self.stretch_, self.Q, filename=temp)
assert os.path.exists(temp.name)
def test_make_rgb_saturated_fix(self):
pytest.skip('saturation correction is not implemented')
satValue = 1000.0
# TODO: Cannot test with these options yet, as that part of the code is not implemented.
with tempfile.NamedTemporaryFile(suffix=".png") as temp:
red = saturate(self.image_r, satValue)
green = saturate(self.image_g, satValue)
blue = saturate(self.image_b, satValue)
lupton_rgb.make_lupton_rgb(red, green, blue, self.min_, self.stretch_, self.Q,
saturated_border_width=1, saturated_pixel_value=2000,
filename=temp)
def test_linear(self):
"""Test using a specified linear stretch"""
map = lupton_rgb.LinearMapping(-8.45, 13.44)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_linear_min_max(self):
"""Test using a min/max linear stretch determined from one image"""
map = lupton_rgb.LinearMapping(image=self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_saturated(self):
"""Test interpolationolating saturated pixels"""
pytest.skip('replaceSaturatedPixels is not implemented in astropy yet')
satValue = 1000.0
self.image_r = saturate(self.image_r, satValue)
self.image_g = saturate(self.image_g, satValue)
self.image_b = saturate(self.image_b, satValue)
lupton_rgb.replaceSaturatedPixels(self.image_r, self.image_g, self.image_b, 1, 2000)
# Check that we replaced those NaNs with some reasonable value
assert np.isfinite(self.image_r.getImage().getArray()).all()
assert np.isfinite(self.image_g.getImage().getArray()).all()
assert np.isfinite(self.image_b.getImage().getArray()).all()
# Prepare for generating an output file
self.imagesR = self.imagesR.getImage()
self.imagesR = self.imagesG.getImage()
self.imagesR = self.imagesB.getImage()
asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q)
rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_different_shapes_asserts(self):
with pytest.raises(ValueError) as excinfo:
# just swap the dimensions to get a differently-shaped 'r'
image_r = self.image_r.reshape(self.height, self.width)
lupton_rgb.make_lupton_rgb(image_r, self.image_g, self.image_b)
assert "shapes must match" in str(excinfo.value)
| bsd-3-clause |
bede/kindel | kindel/kindel.py | 1 | 27863 | # Author: Bede Constantinides - b|at|bede|dot|im, @beconstant
# License: GPL V3
import io
import os
import sys
import argh
import tqdm
import difflib
import simplesam
import subprocess
import scipy.stats
from pprint import pprint
from collections import OrderedDict, defaultdict, namedtuple
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import numpy as np
import pandas as pd
Region = namedtuple('Region', ['start', 'end', 'seq', 'direction'])
def parse_records(ref_id, ref_len, records):
'''
Iterate over records, returning namedtuple of base frequencies, indels and soft clipping info
weights: lists of dicts of base frequencies after CIGAR reconciliation
insertions, deletions: list of dicts of base frequencies
clip_starts, clip_ends: lists of clipping start and end frequencies in an LTR direction
clip_start_weights, clip_end_weights: base frequencies from left- and right-clipped regions
'''
weights = [{'A':0, 'T':0, 'G':0, 'C':0, 'N':0} for p in range(ref_len)]
clip_start_weights = [{'A':0, 'T':0, 'G':0, 'C':0, 'N':0} for p in range(ref_len)]
clip_end_weights = [{'A':0, 'T':0, 'G':0, 'C':0, 'N':0} for p in range(ref_len)]
clip_starts = [0] * (ref_len + 1)
clip_ends = [0] * (ref_len + 1)
insertions = [defaultdict(int) for p in range(ref_len + 1)]
deletions = [0] * (ref_len + 1)
for record in tqdm.tqdm(records, desc='loading sequences'): # Progress bar
q_pos = 0
r_pos = record.pos-1 # Zero indexed genome coordinates
if not record.mapped or len(record.seq) <= 1: # Skips unmapped, reads with sequence '*'
continue
for i, cigarette in enumerate(record.cigars): # StopIteration -> RuntimeError
length, operation = cigarette
if operation in {'M', '=', 'X'}: # Catch SAM 1.{3,4} matches and subs
for _ in range(length):
q_nt = record.seq[q_pos].upper()
weights[r_pos][q_nt] += 1
r_pos += 1
q_pos += 1
elif operation == 'I':
nts = record.seq[q_pos:q_pos+length].upper()
insertions[r_pos][nts] += 1
q_pos += length
elif operation == 'D':
deletions[r_pos] += 1
r_pos += length
elif operation == 'S':
if i == 0: # Count right-of-gap / l-clipped start positions (e.g. start of ref)
clip_ends[r_pos] += 1
for gap_i in range(length):
q_nt = record.seq[gap_i].upper()
rel_r_pos = r_pos - length + gap_i
if rel_r_pos >= 0:
clip_end_weights[rel_r_pos][q_nt] += 1
q_pos += length
else: # Count left-of-gap / r-clipped start position (e.g. end of ref)
clip_starts[r_pos] += 1
for pos in range(length):
q_nt = record.seq[q_pos].upper()
if r_pos < ref_len:
clip_start_weights[r_pos][q_nt] += 1
r_pos += 1
q_pos += 1
aligned_depth = [sum(w.values()) for w in weights]
weights_consensus_seq = ''.join([consensus(w)[0] for w in weights])
discordant_depth = [sum({nt:w[nt]
for nt in [k for k in w.keys() if k != cns_nt]}.values())
for w, cns_nt in zip(weights, weights_consensus_seq)]
consensus_depth = np.array(aligned_depth) - np.array(discordant_depth)
clip_start_depth = [sum({nt:w[nt] for nt in list('ACGT')}.values()) for w in clip_start_weights]
clip_end_depth = [sum({nt:w[nt] for nt in list('ACGT')}.values()) for w in clip_end_weights]
clip_depth = list(map(lambda x, y: x+y, clip_start_depth, clip_end_depth))
alignment = namedtuple('alignment', ['ref_id', 'weights', 'insertions', 'deletions',
'clip_starts', 'clip_ends',
'clip_start_weights', 'clip_end_weights',
'clip_start_depth', 'clip_end_depth',
'clip_depth', 'consensus_depth'])
return alignment(ref_id, weights, insertions, deletions,
clip_starts, clip_ends,
clip_start_weights, clip_end_weights,
clip_start_depth, clip_end_depth,
clip_depth, consensus_depth)
def parse_bam(bam_path):
'''
Returns alignment information for each reference sequence as an OrderedDict
'''
alignments = OrderedDict()
with open(bam_path, 'r') as bam_fh:
bam = simplesam.Reader(bam_fh)
refs_lens = {n.replace('SN:', ''): int(l[0].replace('LN:', ''))
for n, l in bam.header['@SQ'].items()}
refs_records = defaultdict(list)
for r in bam:
for id in refs_lens:
refs_records[r.rname].append(r)
if '*' in refs_records:
del refs_records['*']
# assert len(refs_records) <= 1, 'Detected primary mappings to more than one reference'
# Use samtools view to extract single contig primary mappings
# Otherwise would make a useful enhancement
for ref_id, records in refs_records.items():
alignments[ref_id] = parse_records(ref_id, refs_lens[ref_id], records)
return alignments
def cdr_start_consensuses(weights, clip_start_weights, clip_start_depth,
clip_decay_threshold, mask_ends):
'''
Returns list of Region instances for right clipped consensuses of clip-dominant region
'''
positions = list(range(len(weights)))
masked_positions = positions[:mask_ends] + positions[-mask_ends:]
regions = []
for pos, sc, w in zip(positions, clip_start_depth, weights):
cdr_positions = [t for u in [list(s)
for s in [range(r.start, r.end)
for r in regions]]
for t in u]
if sc/(sum(w.values())+1) > 0.5 and pos not in masked_positions + cdr_positions:
start_pos = pos
clip_consensus = ''
for pos_, (sc_, sw_, w_) in enumerate(zip(clip_start_depth[pos:],
clip_start_weights[pos:],
weights[pos:])):
if sc_ > sum(w_.values()) * clip_decay_threshold:
clip_consensus += consensus(sw_)[0]
else:
end_pos = start_pos + pos_
break
regions.append(Region(start_pos, end_pos, clip_consensus, '→'))
return regions
def cdr_end_consensuses(weights, clip_end_weights, clip_end_depth, clip_decay_threshold, mask_ends):
'''
Returns list of Region instances for left clipped consensuses of clip-dominant region
'''
positions = list(range(len(weights)))
masked_positions = positions[:mask_ends] + positions[-mask_ends:]
reversed_weights = list(sorted(zip(positions, clip_end_depth, clip_end_weights, weights),
key=lambda x: x[0], reverse=True))
regions = []
for i, (pos, ec, ew, w) in enumerate(reversed_weights):
cdr_positions = [t for u in [list(s)
for s in [range(r.start, r.end)
for r in regions]]
for t in u]
if ec/(sum(w.values())+1) > 0.5 and pos not in masked_positions + cdr_positions:
end_pos = pos + 1 # Start with end since we're iterating in reverse
rev_clip_consensus = None
for pos_, ec_, ew_, w_ in reversed_weights[len(positions)-pos:]:
if ec_ > sum(w_.values()) * clip_decay_threshold:
if not rev_clip_consensus: # Add first base to account for lag in clip coverage
rev_clip_consensus = consensus(clip_end_weights[pos_+1])[0]
rev_clip_consensus += consensus(ew_)[0]
else:
start_pos = pos_
clip_consensus = rev_clip_consensus[::-1]
break
regions.append(Region(start_pos, end_pos, clip_consensus, '←'))
return regions
def cdrp_consensuses(weights, clip_start_weights, clip_end_weights, clip_start_depth,
clip_end_depth, clip_decay_threshold, mask_ends):
'''
Returns list of 2-tuples of L&R clipped consensus sequences around clip-dominant regions
Pairs overlapping right (→) and left (←) clipped sequences around CDRs
'''
combined_cdrs = (cdr_start_consensuses(weights, clip_start_weights, clip_start_depth,
clip_decay_threshold, mask_ends)
+ cdr_end_consensuses(weights, clip_end_weights, clip_end_depth,
clip_decay_threshold, mask_ends))
# print('COMBINED_CDRS: {}'.format(len(combined_cdrs)))
paired_cdrs = []
fwd_cdrs = [r for r in combined_cdrs if r.direction == '→']
rev_cdrs = [r for r in combined_cdrs if r.direction == '←']
for fwd_cdr in fwd_cdrs:
fwd_cdr_range = range(fwd_cdr.start, fwd_cdr.end)
for rev_cdr in rev_cdrs:
rev_cdr_range = range(rev_cdr.start, rev_cdr.end)
if set(fwd_cdr_range).intersection(rev_cdr_range):
paired_cdrs.append((fwd_cdr, rev_cdr))
break
return paired_cdrs
def merge_by_lcs(s1, s2, min_overlap=7):
'''Returns superstring of s1 and s2 about an exact overlap of len > min_overlap'''
s = difflib.SequenceMatcher(None, s1, s2)
pos_a, pos_b, size = s.find_longest_match(0, len(s1), 0, len(s2))
if size < min_overlap:
return None
overlap = s1[pos_a:pos_a+size]
pre = s1[:s1.find(overlap)]
post = s2[s2.find(overlap)+len(overlap):]
return pre + overlap + post
def merge_cdrps(cdrps):
'''Returns merged clip-dominant region pairs as Region instances'''
merged_cdrps = []
for cdrp in cdrps:
fwd_cdr, rev_cdr = cdrp
merged_seq = merge_by_lcs(fwd_cdr.seq, rev_cdr.seq) # Fails as None
merged_cdrps.append(Region(fwd_cdr.start, rev_cdr.end, merged_seq, None))
return merged_cdrps
def consensus(weight):
'''
Returns tuple of consensus base, weight and flag indicating a tie for consensus
'''
base, frequency = max(weight.items(), key=lambda x:x[1]) if sum(weight.values()) else ('N', 0)
weight_sans_consensus = {k:d for k, d in weight.items() if k != base}
tie = True if frequency and frequency in weight_sans_consensus.values() else False
aligned_depth = sum(weight.values())
proportion = round(frequency/aligned_depth, 2) if aligned_depth else 0
return(base, frequency, proportion, tie)
def s_overhang_consensus(clip_start_weights, start_pos, min_depth, max_len=500):
'''
Returns consensus sequence (string) of clipped reads at specified position
start_pos is the first position described by the CIGAR-S
'''
consensus_overhang = ''
for pos in range(start_pos, start_pos+max_len):
pos_consensus = consensus(clip_start_weights[pos])
if pos_consensus[1] >= min_depth:
consensus_overhang += pos_consensus[0]
else:
break
return consensus_overhang
def e_overhang_consensus(clip_end_weights, start_pos, min_depth, max_len=500):
'''
Returns consensus sequence (string) of clipped reads at specified position
end_pos is the last position described by the CIGAR-S
'''
rev_consensus_overhang = ''
for pos in range(start_pos, start_pos-max_len, -1):
pos_consensus = consensus(clip_end_weights[pos])
if pos_consensus[1] >= min_depth:
rev_consensus_overhang += pos_consensus[0]
else:
break
consensus_overhang = rev_consensus_overhang[::-1]
return consensus_overhang
def s_flanking_seq(start_pos, weights, min_depth, k):
'''
Returns consensus sequence (string) flanking LHS of soft-clipped gaps
'''
flank_seq = ''
for pos in range(start_pos-k, start_pos):
pos_consensus = consensus(weights[pos])
if pos_consensus[1] >= min_depth:
flank_seq += pos_consensus[0]
return flank_seq
def e_flanking_seq(end_pos, weights, min_depth, k):
'''
Returns consensus sequence (string) flanking RHS of soft-clipped gaps
'''
flank_seq = ''
for pos in range(end_pos+1, end_pos+k+1):
pos_consensus = consensus(weights[pos])
if pos_consensus[1] >= min_depth:
flank_seq += pos_consensus[0]
return flank_seq
def consensus_sequence(weights, clip_start_weights, clip_end_weights, insertions, deletions,
cdr_patches, trim_ends, min_depth, uppercase):
consensus_seq = ''
changes = [None] * len(weights)
skip_positions = 0
for pos, weight in tqdm.tqdm(enumerate(weights),
total=len(weights),
desc='building consensus'):
if skip_positions:
skip_positions -= 1
continue
if cdr_patches and any(r.start == pos and r.seq for r in cdr_patches):
cdr_patch = next(r for r in cdr_patches if r.start == pos)
consensus_seq += cdr_patch.seq.lower()
skip_positions += len(cdr_patch.seq) - 1
continue
ins_freq = sum(insertions[pos].values()) if insertions[pos] else 0
del_freq = deletions[pos]
aligned_depth = sum({nt: weight[nt] for nt in list('ACGT')}.values())
try:
aligned_depth_next = sum({nt: weights[pos+1][nt] for nt in list('ACGT')}.values())
except IndexError:
aligned_depth_next = 0
threshold_freq = aligned_depth * 0.5
indel_threshold_freq = min(threshold_freq, aligned_depth_next * 0.5)
if del_freq > threshold_freq:
changes[pos] = 'D'
elif aligned_depth < min_depth:
consensus_seq += 'N'
changes[pos] = 'N'
else:
if ins_freq > indel_threshold_freq:
insertion = consensus(insertions[pos])
consensus_seq += insertion[0].lower() if not insertion[3] else 'N'
changes[pos] = 'I'
pos_consensus = consensus(weight)
consensus_seq += pos_consensus[0] if not pos_consensus[3] else 'N'
if trim_ends:
consensus_seq = consensus_seq.strip('N')
if uppercase:
consensus_seq = consensus_seq.upper()
return consensus_seq, changes
def consensus_seqrecord(consensus, ref_id):
return SeqRecord(Seq(consensus), id=f'{ref_id}_cns', description='')
def build_report(ref_id, weights, changes, cdr_patches, bam_path, realign, min_depth, min_overlap,
clip_decay_threshold, trim_ends, uppercase):
aligned_depth = [sum({nt:w[nt] for nt in list('ACGT')}.values()) for w in weights]
ambiguous_sites = []
insertion_sites = []
deletion_sites = []
cdr_patches_fmt = ['{}-{}: {}'.format(r.start, r.end, r.seq) for r in cdr_patches] if cdr_patches else ''
for pos, change in enumerate(changes):
if change == 'N':
ambiguous_sites.append(str(pos))
elif change == 'I':
insertion_sites.append(str(pos+1))
elif change == 'D':
deletion_sites.append(str(pos))
report = '========================= REPORT ===========================\n'
report += 'reference: {}\n'.format(ref_id)
report += 'options:\n'
report += '- bam_path: {}\n'.format(bam_path)
report += '- min_depth: {}\n'.format(min_depth)
report += '- realign: {}\n'.format(realign)
report += ' - min_overlap: {}\n'.format(min_overlap)
report += ' - clip_decay_threshold: {}\n'.format(clip_decay_threshold)
report += '- trim_ends: {}\n'.format(trim_ends)
report += '- uppercase: {}\n'.format(uppercase)
report += 'observations:\n'
report += '- min, max observed depth: {}, {}\n'.format(min(aligned_depth),
max(aligned_depth))
report += '- ambiguous sites: {}\n'.format(', '.join(ambiguous_sites))
report += '- insertion sites: {}\n'.format(', '.join(insertion_sites))
report += '- deletion sites: {}\n'.format(', '.join(deletion_sites))
report += '- clip-dominant regions: {}\n'.format(', '.join(cdr_patches_fmt))
return report
def bam_to_consensus(bam_path, realign=False, min_depth=1, min_overlap=7,
clip_decay_threshold=0.1, mask_ends=10, trim_ends=False, uppercase=False):
consensuses = []
refs_changes = {}
refs_reports = {}
# for i, (ref_id, aln) in enumerate(parse_bam(bam_path).items()):
for ref_id, aln in parse_bam(bam_path).items():
if realign:
cdrps = cdrp_consensuses(aln.weights, aln.clip_start_weights, aln.clip_end_weights,
aln.clip_start_depth, aln.clip_end_depth,
clip_decay_threshold, mask_ends)
cdr_patches = merge_cdrps(cdrps)
else:
cdr_patches = None
# print(aln.weights, aln.clip_start_weights, aln.clip_end_weights, aln.insertions, aln.deletions, cdr_patches, trim_ends, min_depth, uppercase)
consensus, changes = consensus_sequence(aln.weights, aln.clip_start_weights,
aln.clip_end_weights, aln.insertions,
aln.deletions, cdr_patches, trim_ends,
min_depth, uppercase)
report = build_report(ref_id, aln.weights, changes, cdr_patches, bam_path, realign,
min_depth, min_overlap, clip_decay_threshold, trim_ends,
uppercase)
consensuses.append(consensus_seqrecord(consensus, ref_id))
refs_reports[ref_id] = report
refs_changes[ref_id] = changes
result = namedtuple('result', ['consensuses', 'refs_changes', 'refs_reports'])
return result(consensuses, refs_changes, refs_reports)
def weights(bam_path: 'path to SAM/BAM file',
relative: 'output relative nucleotide frequencies'=False,
no_confidence: 'skip confidence calculation'=False):
'''
Returns DataFrame of per-site nucleotide frequencies, depth, consensus, lower bound of the
99% consensus confidence interval, and Shannon entropy
'''
def binomial_ci(count, nobs, alpha=0.01):
'''Returns lower, upper bounds of the Jeffrey binomial proportion confidence interval'''
lower_ci, upper_ci = scipy.stats.beta.interval(1-alpha, count+0.5, nobs-count+0.5)
return lower_ci, upper_ci
refs_alns = parse_bam(bam_path)
weights_fmt = []
for ref, aln in refs_alns.items():
weights_fmt.extend([dict(w, ref=ref, pos=i) for i, w in enumerate(aln.weights, start=1)])
weights_df = pd.DataFrame(weights_fmt, columns=['ref','pos','A','C','G','T','N'])
weights_df['depth'] = weights_df[['A','C','G','T','N']].sum(axis=1)
consensus_depths_df = weights_df[['A','C','G','T','N']].max(axis=1)
weights_df['consensus'] = consensus_depths_df.divide(weights_df.depth)
rel_weights_df = pd.DataFrame()
for nt in ['A','C','G','T','N']:
rel_weights_df[[nt]] = weights_df[[nt]].divide(weights_df.depth, axis=0)
rel_weights_df = rel_weights_df.round(dict(A=3, C=3, G=3, T=3, N=3))
weights_df['shannon'] = [scipy.stats.entropy(x)
for x in rel_weights_df[['A','C','G','T']].values]
if not no_confidence:
conf_ints = [binomial_ci(c, t) for c, t, in zip(consensus_depths_df,
weights_df['depth'])]
weights_df['lower_ci'] = [ci[0] for ci in conf_ints]
weights_df['upper_ci'] = [ci[1] for ci in conf_ints]
if relative:
for nt in ['A','C','G','T','N']:
weights_df[[nt]] = rel_weights_df[[nt]]
return weights_df.round(dict(consensus=3, lower_ci=3, upper_ci=3, shannon=3))
def features(bam_path: 'path to SAM/BAM file'):
'''
Returns DataFrame of relative per-site nucleotide frequencies, insertions, deletions and entropy
'''
refs_alns = parse_bam(bam_path)
weights_fmt = []
for ref, aln in refs_alns.items():
weights_fmt.extend([dict(w, ref=ref, pos=i) for i, w in enumerate(aln.weights, start=1)])
for pos, weight in enumerate(weights_fmt):
weight['i'] = sum(aln.insertions[pos].values())
weight['d'] = aln.deletions[pos]
# Think about which columns should sum to 1
weights_df = pd.DataFrame(weights_fmt, columns=['ref','pos','A','C','G','T','N','i','d'])
weights_df['depth'] = weights_df[['A','C','G','T','N', 'd']].sum(axis=1)
consensus_depths_df = weights_df[['A','C','G','T','N']].max(axis=1)
weights_df['consensus'] = consensus_depths_df.divide(weights_df.depth)
for nt in ['A','C','G','T','N','i','d']:
weights_df[[nt]] = weights_df[[nt]].divide(weights_df.depth, axis=0)
weights_df['shannon'] = [scipy.stats.entropy(x)
for x in weights_df[['A','C','G','T','i','d']].as_matrix()]
return weights_df.round(3)
def variants(bam_path: 'path to SAM/BAM file',
abs_threshold: 'absolute frequency (0-∞) above which to call variants'=1,
rel_threshold: 'relative frequency (0.0-1.0) above which to call variants'=0.01,
only_variants: 'exclude invariant sites from output'=False,
absolute: 'report absolute variant frequencies'=False):
'''
Returns DataFrame of single nucleotide variant frequencies exceeding specified frequency
thresholds from an aligned BAM
'''
weights_df = weights(bam_path, no_confidence=True)
weights = weights_df[['A','C','G','T']].to_dict('records')
variant_sites = []
for i, weight in enumerate(weights):
depth = sum(weight.values())
consensus = consensus(weight)
alt_weight = {nt:w for nt, w in weight.items() if nt != consensus[0]}
alt_weight_rel = {nt:w/depth for nt, w in alt_weight.items() if depth}
alt_depths = alt_weight.values()
max_alt_weight = max(alt_weight, key=alt_weight.get)
max_alt_depth = max(alt_depths)
alts_above_thresholds = {nt:w for nt, w in alt_weight.items() # Get weights >= abs & rel
if depth and w >= abs_threshold and w/depth >= rel_threshold}
if absolute:
variant_sites.append(alts_above_thresholds)
else:
variant_sites.append({nt:round(w/depth, 3)
for nt, w in alts_above_thresholds.items() if depth})
variants_df = pd.DataFrame(variant_sites, columns=['A','C','G','T'])
variants_df = pd.concat([weights_df.ref,
weights_df.pos,
variants_df,
weights_df.depth,
weights_df.consensus,
weights_df.shannon], axis=1)
if only_variants:
variants_df = variants_df[variants_df['A'].notnull()
| variants_df['C'].notnull()
| variants_df['G'].notnull()
| variants_df['T'].notnull()]
return variants_df
def parse_samtools_depth(*args):
ids_depths = {}
for arg in args:
id = arg
depths_df = pd.read_table(arg, names=['contig', 'position', 'depth'])
ids_depths[id] = depths_df.depth.tolist()
return ids_depths
def plotly_samtools_depth(ids_depths):
n_positions = len(ids_depths[max(ids_depths, key=lambda x: len(set(ids_depths[x])))])
traces = []
for id, depths in sorted(ids_depths.items()):
traces.append(
go.Scattergl(
x=list(range(1, n_positions)),
y=depths,
mode='lines',
name=id,
text=id))
layout = go.Layout(
title='Depth of coverage',
xaxis=dict(
title='Position',
gridcolor='rgb(255, 255, 255)',
gridwidth=2),
yaxis=dict(
title='Depth',
gridcolor='rgb(255, 255, 255)',
gridwidth=2,
type='log'),
paper_bgcolor='rgb(243, 243, 243)',
plot_bgcolor='rgb(243, 243, 243)')
fig = go.Figure(data=traces, layout=layout)
py.plot(fig, filename='depths.html')
def parse_variants(*args):
ids_data = {}
for arg in args:
id = arg
df = pd.read_table(arg, sep='\t')
df['max_alt'] = df[['A', 'C', 'G', 'T']].max(axis=1)
ids_data[id] = df.to_dict('series')
return ids_data
def plotly_variants(ids_data):
import plotly.offline as py
import plotly.graph_objs as go
traces = []
for id, data in sorted(ids_data.items()):
traces.append(
go.Scattergl(
x=data['pos'],
y=data['max_alt'],
mode='markers',
name=id,
text=id))
layout = go.Layout(
title='Variants',
xaxis=dict(
title='Position',
gridcolor='rgb(255, 255, 255)',
gridwidth=2),
yaxis=dict(
title='Abundance',
gridcolor='rgb(255, 255, 255)',
gridwidth=2,
type='linear'),
paper_bgcolor='rgb(243, 243, 243)',
plot_bgcolor='rgb(243, 243, 243)')
fig = go.Figure(data=traces, layout=layout)
py.plot(fig, filename='variants.html')
def plotly_clips(bam_path):
import plotly.offline as py
import plotly.graph_objs as go
aln = list(parse_bam(bam_path).items())[0][1]
aligned_depth = [sum(weight.values()) for weight in aln.weights]
ins = [sum(i.values()) for i in aln.insertions]
x_axis = list(range(len(aligned_depth)))
traces = [
go.Scattergl(
x = x_axis,
y = aligned_depth,
mode = 'lines',
name = 'Aligned depth'),
go.Scattergl(
x = x_axis,
y = aln.consensus_depth,
mode = 'lines',
name = 'Consensus depth'),
go.Scattergl(
x = x_axis,
y = aln.clip_start_depth,
mode = 'lines',
name = 'Soft clip start depth'),
go.Scattergl(
x = x_axis,
y = aln.clip_end_depth,
mode = 'lines',
name = 'Soft clip end depth'),
go.Scattergl(
x = x_axis,
y = aln.clip_starts,
mode = 'markers',
name = 'Soft clip starts'),
go.Scattergl(
x = x_axis,
y = aln.clip_ends,
mode = 'markers',
name = 'Soft clip ends'),
go.Scattergl(
x = x_axis,
y = ins,
mode = 'markers',
name = 'Insertions'),
go.Scattergl(
x = x_axis,
y = aln.deletions,
mode = 'markers',
name = 'Deletions')]
layout = go.Layout(
xaxis=dict(
type='linear',
autorange=True),
yaxis=dict(
type='linear',
autorange=True))
fig = go.Figure(data=traces, layout=layout)
out_fn = os.path.splitext(os.path.split(bam_path)[1])[0]
py.plot(fig, filename=out_fn + '.clips.html')
if __name__ == '__main__':
kindel.cli.main()
| gpl-3.0 |
ThomasMiconi/htmresearch | htmresearch/frameworks/layers/l2_l4_inference.py | 2 | 25173 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This class allows to easily create experiments using a L4-L2 network for
inference over objects. It uses the network API and multiple regions (raw
sensors for sensor and external input, column pooler region, extended temporal
memory region).
Here is a sample use of this class, to learn two very simple objects
and infer one of them. In this case, we use a SimpleObjectMachine to generate
objects. If no object machine is used, objects and sensations should be passed
in a very specific format (cf. learnObjects() and infer() for more
information).
exp = L4L2Experiment(
name="sample",
numCorticalColumns=2,
)
# Set up inputs for learning
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=2,
)
objects.addObject([(1, 2), (2, 3)], name=0)
objects.addObject([(1, 2), (4, 5)], name=1)
objectsToLearn = objects.provideObjectsToLearn()
# Do the learning phase
exp.learnObjects(objectsToLearn, reset=True)
exp.printProfile()
# Set up inputs for inference
inferConfig = {
"numSteps": 2,
"noiseLevel": 0.05,
"pairs": {
0: [(1, 2), (2, 3)],
1: [(2, 3), (1, 2)],
}
}
objectsToInfer = objects.provideObjectToInfer(inferConfig)
# Do the inference phase
exp.infer(objectsToInfer,
objectName=0, reset=True)
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
plotDir="plots",
)
More examples are available in projects/layers/single_column.py and
projects/layers/multi_column.py
"""
# Disable variable/field name restrictions
# pylint: disable=C0103
import collections
import os
import random
import matplotlib.pyplot as plt
from tabulate import tabulate
from htmresearch.support.logging_decorator import LoggingDecorator
from htmresearch.support.register_regions import registerAllResearchRegions
from htmresearch.frameworks.layers.laminar_network import createNetwork
def rerunExperimentFromLogfile(logFilename):
"""
Create an experiment class according to the sequence of operations in logFile
and return resulting experiment instance.
"""
callLog = LoggingDecorator.load(logFilename)
# Assume first one is call to constructor
exp = L4L2Experiment(*callLog[0][1]["args"], **callLog[0][1]["kwargs"])
# Call subsequent methods, using stored parameters
for call in callLog[1:]:
method = getattr(exp, call[0])
method(*call[1]["args"], **call[1]["kwargs"])
return exp
class L4L2Experiment(object):
"""
L4-L2 experiment.
This experiment uses the network API to test out various properties of
inference and learning using a sensors and an L4-L2 network. For now,
we directly use the locations on the object.
"""
@LoggingDecorator()
def __init__(self,
name,
numCorticalColumns=1,
inputSize=1024,
numInputBits=20,
externalInputSize=1024,
numExternalInputBits=20,
L2Overrides=None,
L4RegionType="py.ExtendedTMRegion",
L4Overrides=None,
numLearningPoints=3,
seed=42,
logCalls=False,
enableLateralSP=False,
lateralSPOverrides=None,
enableFeedForwardSP=False,
feedForwardSPOverrides=None
):
"""
Creates the network.
Parameters:
----------------------------
@param name (str)
Experiment name
@param numCorticalColumns (int)
Number of cortical columns in the network
@param inputSize (int)
Size of the sensory input
@param numInputBits (int)
Number of ON bits in the generated input patterns
@param externalInputSize (int)
Size of the lateral input to L4 regions
@param numExternalInputBits (int)
Number of ON bits in the external input patterns
@param L2Overrides (dict)
Parameters to override in the L2 region
@param L4RegionType (string)
The type of region to use for L4
@param L4Overrides (dict)
Parameters to override in the L4 region
@param numLearningPoints (int)
Number of times each pair should be seen to be learnt
@param logCalls (bool)
If true, calls to main functions will be logged internally. The
log can then be saved with saveLogs(). This allows us to recreate
the complete network behavior using rerunExperimentFromLogfile
which is very useful for debugging.
@param enableLateralSP (bool)
If true, Spatial Pooler will be added between external input and
L4 lateral input
@param lateralSPOverrides
Parameters to override in the lateral SP region
@param enableFeedForwardSP (bool)
If true, Spatial Pooler will be added between external input and
L4 feed-forward input
@param feedForwardSPOverrides
Parameters to override in the feed-forward SP region
"""
# Handle logging - this has to be done first
self.logCalls = logCalls
registerAllResearchRegions()
self.name = name
self.numLearningPoints = numLearningPoints
self.numColumns = numCorticalColumns
self.inputSize = inputSize
self.externalInputSize = externalInputSize
self.numInputBits = numInputBits
# seed
self.seed = seed
random.seed(seed)
# update parameters with overrides
self.config = {
"networkType": "MultipleL4L2Columns",
"numCorticalColumns": numCorticalColumns,
"externalInputSize": externalInputSize,
"sensorInputSize": inputSize,
"L4RegionType": L4RegionType,
"L4Params": self.getDefaultL4Params(L4RegionType, inputSize,
numExternalInputBits),
"L2Params": self.getDefaultL2Params(inputSize, numInputBits),
}
if enableLateralSP:
self.config["lateralSPParams"] = self.getDefaultLateralSPParams(inputSize)
if lateralSPOverrides:
self.config["lateralSPParams"].update(lateralSPOverrides)
if enableFeedForwardSP:
self.config["feedForwardSPParams"] = self.getDefaultFeedForwardSPParams(inputSize)
if feedForwardSPOverrides:
self.config["feedForwardSPParams"].update(feedForwardSPOverrides)
if L2Overrides is not None:
self.config["L2Params"].update(L2Overrides)
if L4Overrides is not None:
self.config["L4Params"].update(L4Overrides)
# create network
self.network = createNetwork(self.config)
self.sensorInputs = []
self.externalInputs = []
self.L4Regions = []
self.L2Regions = []
for i in xrange(self.numColumns):
self.sensorInputs.append(
self.network.regions["sensorInput_" + str(i)].getSelf()
)
self.externalInputs.append(
self.network.regions["externalInput_" + str(i)].getSelf()
)
self.L4Regions.append(
self.network.regions["L4Column_" + str(i)]
)
self.L2Regions.append(
self.network.regions["L2Column_" + str(i)]
)
self.L4Columns = [region.getSelf() for region in self.L4Regions]
self.L2Columns = [region.getSelf() for region in self.L2Regions]
# will be populated during training
self.objectL2Representations = {}
self.statistics = []
@LoggingDecorator()
def learnObjects(self, objects, reset=True):
"""
Learns all provided objects, and optionally resets the network.
The provided objects must have the canonical learning format, which is the
following.
objects should be a dict objectName: sensationList, where each
sensationList is a list of sensations, and each sensation is a mapping
from cortical column to a tuple of two SDR's respectively corresponding
to the location in object space and the feature.
For example, the input can look as follows, if we are learning a simple
object with two sensations (with very few active bits for simplicity):
objects = {
"simple": [
{
0: (set([1, 5, 10]), set([6, 12, 52]), # location, feature for CC0
1: (set([6, 2, 15]), set([64, 1, 5]), # location, feature for CC1
},
{
0: (set([5, 46, 50]), set([8, 10, 11]), # location, feature for CC0
1: (set([1, 6, 45]), set([12, 17, 23]), # location, feature for CC1
},
]
}
In many uses cases, this object can be created by implementations of
ObjectMachines (cf htm.research.object_machine_factory), through their
method providedObjectsToLearn.
Parameters:
----------------------------
@param objects (dict)
Objects to learn, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after learning.
"""
self._setLearningMode()
for objectName, sensationList in objects.iteritems():
# ignore empty sensation lists
if len(sensationList) == 0:
continue
# keep track of numbers of iterations to run
iterations = 0
for sensations in sensationList:
# learn each pattern multiple times
for _ in xrange(self.numLearningPoints):
for col in xrange(self.numColumns):
location, feature = sensations[col]
self.sensorInputs[col].addDataToQueue(list(feature), 0, 0)
self.externalInputs[col].addDataToQueue(list(location), 0, 0)
iterations += 1
# actually learn the objects
if iterations > 0:
self.network.run(iterations)
# update L2 representations
self.objectL2Representations[objectName] = self.getL2Representations()
if reset:
# send reset signal
self._sendReset()
@LoggingDecorator()
def infer(self, sensationList, reset=True, objectName=None):
"""
Infer on given sensations.
The provided sensationList is a list of sensations, and each sensation is
a mapping from cortical column to a tuple of two SDR's respectively
corresponding to the location in object space and the feature.
For example, the input can look as follows, if we are inferring a simple
object with two sensations (with very few active bits for simplicity):
sensationList = [
{
0: (set([1, 5, 10]), set([6, 12, 52]), # location, feature for CC0
1: (set([6, 2, 15]), set([64, 1, 5]), # location, feature for CC1
},
{
0: (set([5, 46, 50]), set([8, 10, 11]), # location, feature for CC0
1: (set([1, 6, 45]), set([12, 17, 23]), # location, feature for CC1
},
]
In many uses cases, this object can be created by implementations of
ObjectMachines (cf htm.research.object_machine_factory), through their
method providedObjectsToInfer.
If the object is known by the caller, an object name can be specified
as an optional argument, and must match the objects given while learning.
Parameters:
----------------------------
@param objects (dict)
Objects to learn, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after learning.
@param objectName (str)
Name of the objects (must match the names given during learning).
"""
self._unsetLearningMode()
statistics = collections.defaultdict(list)
if objectName is not None:
if objectName not in self.objectL2Representations:
raise ValueError("The provided objectName was not given during"
" learning")
for sensations in sensationList:
# feed all columns with sensations
for col in xrange(self.numColumns):
location, feature = sensations[col]
self.sensorInputs[col].addDataToQueue(list(feature), 0, 0)
self.externalInputs[col].addDataToQueue(list(location), 0, 0)
self.network.run(1)
self._updateInferenceStats(statistics, objectName)
if reset:
# send reset signal
self._sendReset()
# save statistics
statistics["numSteps"] = len(sensationList)
statistics["object"] = objectName if objectName is not None else "Unknown"
self.statistics.append(statistics)
def _sendReset(self, sequenceId=0):
"""
Sends a reset signal to the network.
"""
for col in xrange(self.numColumns):
self.sensorInputs[col].addResetToQueue(sequenceId)
self.externalInputs[col].addResetToQueue(sequenceId)
self.network.run(1)
@LoggingDecorator()
def sendReset(self, *args, **kwargs):
"""
Public interface to sends a reset signal to the network. This is logged.
"""
self._sendReset(*args, **kwargs)
def plotInferenceStats(self,
fields,
plotDir="plots",
experimentID=0,
onePlot=True):
"""
Plots and saves the desired inference statistics.
Parameters:
----------------------------
@param fields (list(str))
List of fields to include in the plots
@param experimentID (int)
ID of the experiment (usually 0 if only one was conducted)
@param onePlot (bool)
If true, all cortical columns will be merged in one plot.
"""
if not os.path.exists(plotDir):
os.makedirs(plotDir)
plt.figure()
stats = self.statistics[experimentID]
objectName = stats["object"]
for i in xrange(self.numColumns):
if not onePlot:
plt.figure()
# plot request stats
for field in fields:
fieldKey = field + " C" + str(i)
plt.plot(stats[fieldKey], marker='+', label=fieldKey)
# format
plt.legend(loc="upper right")
plt.xlabel("Sensation #")
plt.xticks(range(stats["numSteps"]))
plt.ylabel("Number of active bits")
plt.ylim(plt.ylim()[0] - 5, plt.ylim()[1] + 5)
plt.title("Object inference for object {}".format(objectName))
# save
if not onePlot:
relPath = "{}_exp_{}_C{}.png".format(self.name, experimentID, i)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close()
if onePlot:
relPath = "{}_exp_{}.png".format(self.name, experimentID)
path = os.path.join(plotDir, relPath)
plt.savefig(path)
plt.close()
def getInferenceStats(self, experimentID=None):
"""
Returns the statistics for the desired experiment. If experimentID is None
return all statistics
Parameters:
----------------------------
@param experimentID (int)
ID of the experiment (usually 0 if only one was conducted)
"""
if experimentID is None:
return self.statistics
else:
return self.statistics[experimentID]
def printProfile(self, reset=False):
"""
Prints profiling information.
Parameters:
----------------------------
@param reset (bool)
If set to True, the profiling will be reset.
"""
print "Profiling information for {}".format(type(self).__name__)
totalTime = 0.000001
for region in self.network.regions.values():
timer = region.getComputeTimer()
totalTime += timer.getElapsed()
# Sort the region names
regionNames = list(self.network.regions.keys())
regionNames.sort()
count = 1
profileInfo = []
L2Time = 0.0
L4Time = 0.0
for regionName in regionNames:
region = self.network.regions[regionName]
timer = region.getComputeTimer()
count = max(timer.getStartCount(), count)
profileInfo.append([region.name,
timer.getStartCount(),
timer.getElapsed(),
100.0 * timer.getElapsed() / totalTime,
timer.getElapsed() / max(timer.getStartCount(), 1)])
if "L2Column" in regionName:
L2Time += timer.getElapsed()
elif "L4Column" in regionName:
L4Time += timer.getElapsed()
profileInfo.append(
["Total time", "", totalTime, "100.0", totalTime / count])
print tabulate(profileInfo, headers=["Region", "Count",
"Elapsed", "Pct of total",
"Secs/iteration"],
tablefmt="grid", floatfmt="6.3f")
print
print "Total time in L2 =", L2Time
print "Total time in L4 =", L4Time
if reset:
self.resetProfile()
def resetProfile(self):
"""
Resets the network profiling.
"""
self.network.resetProfiling()
def getL4Representations(self):
"""
Returns the active representation in L4.
"""
return [set(column.getOutputData("activeCells").nonzero()[0])
for column in self.L4Regions]
def getL4PredictiveCells(self):
"""
Returns the predictive cells in L4.
"""
return [set(column.getOutputData("predictedCells").nonzero()[0])
for column in self.L4Regions]
def getL2Representations(self):
"""
Returns the active representation in L2.
"""
return [set(column._pooler.getActiveCells()) for column in self.L2Columns]
def getCurrentClassification(self, minOverlap=None, includeZeros=True):
"""
A dict with a score for each object. Score goes from 0 to 1. A 1 means
every col (that has received input since the last reset) currently has
overlap >= minOverlap with the representation for that object.
:param minOverlap: min overlap to consider the object as recognized.
Defaults to half of the SDR size
:param includeZeros: if True, include scores for all objects, even if 0
:return: dict of object names and their score
"""
results = {}
l2sdr = self.getL2Representations()
sdrSize = self.config["L2Params"]["sdrSize"]
if minOverlap is None:
minOverlap = sdrSize / 2
for objectName, objectSdr in self.objectL2Representations.iteritems():
count = 0
score = 0.0
for i in xrange(self.numColumns):
# Ignore inactive column
if len(l2sdr[i]) == 0:
continue
count += 1
overlap = len(l2sdr[i] & objectSdr[i])
if overlap >= minOverlap:
score += 1
if count == 0:
if includeZeros:
results[objectName] = 0
else:
if includeZeros or score>0.0:
results[objectName] = score / count
return results
def getDefaultL4Params(self, L4RegionType, inputSize, numInputBits):
"""
Returns a good default set of parameters to use in the L4 region.
"""
sampleSize = int(1.5 * numInputBits)
if numInputBits == 20:
activationThreshold = 13
minThreshold = 13
elif numInputBits == 10:
activationThreshold = 8
minThreshold = 8
else:
activationThreshold = int(numInputBits * .6)
minThreshold = activationThreshold
if L4RegionType == "py.ExtendedTMRegion":
return {
"columnCount": inputSize,
"cellsPerColumn": 16,
"formInternalBasalConnections": False,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
"minThreshold": minThreshold,
"predictedSegmentDecrement": 0.0,
"activationThreshold": activationThreshold,
"maxNewSynapseCount": sampleSize,
"implementation": "etm",
"seed": self.seed
}
elif L4RegionType == "py.ApicalTMRegion":
return {
"columnCount": inputSize,
"cellsPerColumn": 16,
"learn": True,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
"minThreshold": minThreshold,
"basalPredictedSegmentDecrement": 0.0,
"apicalPredictedSegmentDecrement": 0.0,
"activationThreshold": activationThreshold,
"sampleSize": sampleSize,
"implementation": "ApicalTiebreak",
"seed": self.seed
}
else:
raise ValueError("Unknown L4RegionType: %s" % L4RegionType)
def getDefaultL2Params(self, inputSize, numInputBits):
"""
Returns a good default set of parameters to use in the L2 region.
"""
if numInputBits == 20:
sampleSizeProximal = 10
minThresholdProximal = 6
elif numInputBits == 10:
sampleSizeProximal = 6
minThresholdProximal = 3
else:
sampleSizeProximal = int(numInputBits * .6)
minThresholdProximal = int(sampleSizeProximal * .6)
return {
"inputWidth": inputSize * 16,
"cellCount": 4096,
"sdrSize": 40,
"synPermProximalInc": 0.1,
"synPermProximalDec": 0.001,
"initialProximalPermanence": 0.6,
"minThresholdProximal": minThresholdProximal,
"sampleSizeProximal": sampleSizeProximal,
"connectedPermanenceProximal": 0.5,
"synPermDistalInc": 0.1,
"synPermDistalDec": 0.001,
"initialDistalPermanence": 0.41,
"activationThresholdDistal": 13,
"sampleSizeDistal": 20,
"connectedPermanenceDistal": 0.5,
"distalSegmentInhibitionFactor": 1.001,
"seed": self.seed,
"learningMode": True,
}
def getDefaultLateralSPParams(self, inputSize):
return {
"spatialImp": "cpp",
"globalInhibition": 1,
"columnCount": 1024,
"inputWidth": inputSize,
"numActiveColumnsPerInhArea": 40,
"seed": self.seed,
"potentialPct": 0.8,
"synPermConnected": 0.1,
"synPermActiveInc": 0.0001,
"synPermInactiveDec": 0.0005,
"boostStrength": 0.0,
}
def getDefaultFeedForwardSPParams(self, inputSize):
return {
"spatialImp": "cpp",
"globalInhibition": 1,
"columnCount": 1024,
"inputWidth": inputSize,
"numActiveColumnsPerInhArea": 40,
"seed": self.seed,
"potentialPct": 0.8,
"synPermConnected": 0.1,
"synPermActiveInc": 0.0001,
"synPermInactiveDec": 0.0005,
"boostStrength": 0.0,
}
def _unsetLearningMode(self):
"""
Unsets the learning mode, to start inference.
"""
for region in self.L4Regions:
region.setParameter("learn", False)
for region in self.L2Regions:
region.setParameter("learningMode", False)
def _setLearningMode(self):
"""
Sets the learning mode.
"""
for region in self.L4Regions:
region.setParameter("learn", True)
for region in self.L2Regions:
region.setParameter("learningMode", True)
def _updateInferenceStats(self, statistics, objectName=None):
"""
Updates the inference statistics.
Parameters:
----------------------------
@param statistics (dict)
Dictionary in which to write the statistics
@param objectName (str)
Name of the inferred object, if known. Otherwise, set to None.
"""
L4Representations = self.getL4Representations()
L4PredictiveCells = self.getL4PredictiveCells()
L2Representation = self.getL2Representations()
for i in xrange(self.numColumns):
statistics["L4 Representation C" + str(i)].append(
len(L4Representations[i])
)
statistics["L4 Predictive C" + str(i)].append(
len(L4PredictiveCells[i])
)
statistics["L2 Representation C" + str(i)].append(
len(L2Representation[i])
)
statistics["L4 Apical Segments C" + str(i)].append(
len(self.L4Columns[i]._tm.getActiveApicalSegments())
)
# add true overlap if objectName was provided
if objectName is not None:
objectRepresentation = self.objectL2Representations[objectName]
statistics["Overlap L2 with object C" + str(i)].append(
len(objectRepresentation[i] & L2Representation[i])
)
| agpl-3.0 |
ankurankan/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 15 | 33321 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
"""Regression test: max_features didn't work correctly in 0.14."""
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('l1', 'l2')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('l1', 'l2'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
massmutual/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/manifold/tests/test_mds.py | 99 | 1873 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.manifold import mds
from sklearn.utils.testing import assert_raises
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
josealber84/deep-learning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
Djabbz/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
ricardog/raster-project | landuse-change.py | 1 | 10594 | #!/usr/bin/env python
# From
## https://stackoverflow.com/questions/22787209/how-to-have-clusters-of-stacked-bars-with-python-pandas
import click
import itertools
import json
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import multiprocessing
import numpy as np
import numpy.ma as ma
import os
import pandas as pd
from pylru import lrudecorator
import rasterio
import rasterio.windows
import time
import pdb
import projections.lu as lu
import projections.lui as lui
import projections.utils as utils
import projections.predicts as predicts
from projections.rasterset import RasterSet
class YearRangeParamType(click.ParamType):
name = 'year range'
def convert(self, value, param, ctx):
try:
try:
return [int(value)]
except ValueError:
l, h = value.split(':')
return range(int(l), int(h))
except ValueError:
self.fail('%s is not a valid year range' % value, param, ctx)
YEAR_RANGE = YearRangeParamType()
@lrudecorator(10)
def carea(bounds=None, height=None):
ice_ds = rasterio.open('netcdf:%s:icwtr' % utils.luh2_static())
print('reading area')
ds = rasterio.open('netcdf:%s:carea' % utils.luh2_static())
if bounds is None:
ice = ice_ds.read(1)
mask = np.where(ice == 1, True, False)
ca = ds.read(1, masked=True)
ca.mask = np.logical_or(ca.mask, mask)
return ca
win = ds.window(*bounds)
if height is not None and win[1][1] - win[0][1] > height:
win = ((win[0][0], win[0][0] + height), win[1])
ice = ice_ds.read(1, window=win)
mask = np.where(ice == 1, True, False)
ca = ds.read(1, masked=True, window=win)
ca.mask = np.logical_or(ca.mask, mask)
return ca
@lrudecorator(5)
def carea2(bounds=None, height=None):
ds = rasterio.open('netcdf:%s:carea' %
os.path.join(utils.luh2_dir(),
'staticData_quarterdeg.nc'))
if bounds is None:
return ds.read(1, masked=True)
win = ds.window(*bounds)
if win[1][1] - win[0][1] > height:
win = ((win[0][0], win[0][0] + height), win[1])
return ds.read(1, masked=True, window=win)
@lrudecorator(5)
def tarea(bounds=None, height=None):
area = carea2(bounds, height)
ice_ds = rasterio.open(utils.luh2_static('icwtr'))
if bounds is None:
ice = ice_ds.read(1, masked=True)
else:
win = ice_ds.window(*bounds)
if win[1][1] - win[0][1] > height:
win = ((win[0][0], win[0][0] + height), win[1])
ice = ice_ds.read(1, masked=True, window=win)
return area * (1 - ice)
def plot_clustered_stacked(dfall, labels=None,
title="multiple stacked bar plot",
H="/", **kwargs):
"""Given a list of dataframes, with identical columns and index, create
a clustered stacked bar plot. labels is a list of the names of the
dataframe, used for the legend title is a string for the title of the
plot H is the hatch used for identification of the different dataframe
"""
n_df = len(dfall)
n_col = len(dfall[0].columns)
n_ind = len(dfall[0].index)
#axe = plt.subplot(111)
fig = plt.figure(figsize=(7, 4))
axe = fig.add_axes([0.1, 0.1, 0.7, 0.75])
for df in dfall : # for each data frame
axe = df.plot(kind="bar",
linewidth=0,
stacked=True,
ax=axe,
cmap=plt.cm.viridis,
legend=False,
grid=False,
**kwargs) # make bar plots
h,l = axe.get_legend_handles_labels() # get the handles we want to modify
for i in range(0, n_df * n_col, n_col): # len(h) = n_col * n_df
for j, pa in enumerate(h[i:i+n_col]):
for rect in pa.patches: # for each index
rect.set_x(rect.get_x() + 1 / float(n_df + 1) * i / float(n_col))
rect.set_hatch(H * int(i / n_col)) #edited part
rect.set_width(1 / float(n_df + 1))
axe.set_xticks((np.arange(0, 2 * n_ind, 2) + 1 / float(n_df + 1)) / 2.)
axe.set_xticklabels(df.index, rotation = 0)
axe.set_title(title)
# Add invisible data to add another legend
n=[]
for i in range(n_df):
n.append(axe.bar(0, 0, color="gray", hatch=H * i))
l1 = axe.legend(h[:n_col], l[:n_col], loc=[1.01, 0.5])
if labels is not None:
l2 = plt.legend(n, labels, loc=[1.01, 0.1])
axe.add_artist(l1)
return axe
def read_data(dirname, lu_name, scenario, year):
if lu_name == 'rcp':
types = lu.rcp.types()
elif lu_name == 'luh2':
types = lu.luh2.types()
elif lu_name == 'luh5':
types = lu.luh5.types()
else:
raise RuntimeError('Error: unknown land use class %s' % lu)
print("reading %s %d" % (scenario, year))
types = filter(lambda x: x != 'plantation_sec', types)
rasters = []
for name in types:
if name == 'plantation_sec':
continue
for intensity in lui.intensities():
path = os.path.join(dirname, lu_name,
'%s-%s_%s-%d.tif' % (scenario, name,
intensity, year))
ras = rasterio.open(path)
rasters.append(ras)
all_bounds = map(lambda x: x.bounds, rasters)
minxs, minys, maxxs, maxys = zip(*all_bounds)
bounds = (max(minxs), max(minys), min(maxxs), min(maxys))
areas = carea(bounds)
total = ma.sum(areas)
areas /= total / 100
df = pd.DataFrame(index=types, columns=lui.intensities().reverse())
for name in types:
for intensity in lui.intensities():
raster = rasters.pop(0)
data = raster.read(1, masked=True, window=raster.window(*bounds))
df.loc[name, intensity] = ma.sum(data * areas)
if 'plantation_pri' in types:
df.rename(index={'plantation_pri': 'plantation'}, inplace=True)
return df
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
if ctx.invoked_subcommand is None:
click.echo('I was invoked without subcommand')
else:
click.echo('I am about to invoke %s' % ctx.invoked_subcommand)
@cli.command()
@click.option('--show', '-s', is_flag=True, default=False)
def barplot(show):
plt.style.use('ggplot')
# Read the data
df1 = read_data('ds', 'luh5', 'historical', 1950)
df2 = read_data('ds', 'luh5', 'historical', 2010)
# Then, just call :
ax = plot_clustered_stacked([df1, df2], ["1950", "2010"],
title="Land use change 1950 to 2010")
ax.set_ylabel('Fraction of land surface (%)')
plt.savefig('landuse-1920-2010.png')
if show:
plt.show()
def bounds(meta):
ul = meta['affine'] * (0, 0)
lr = meta['affine'] * (meta['width'], meta['height'])
return (ul[0], lr[1], lr[0], ul[1])
def eval(what, rsf, rsn):
datan, meta = rsn.eval(what, quiet=True)
dataf, _ = rsf.eval(what, quiet=True)
#data_vals = dataf.filled(0) + datan.filled(0)
#data = data_vals.view(ma.MaskedArray)
#data.mask = np.logical_and(dataf.mask, datan.mask)
#return data, meta
area = carea(bounds(meta))
valf = ma.sum(dataf * area)
valn = ma.sum(datan * area)
return float(valf + valn), meta
def project_hpd(scenario, year):
print("projecting hpd for %d using %s" % (year, scenario))
rasters = predicts.rasterset('luh2', scenario, year, 'f')
rs = RasterSet(rasters)
values, meta = rs.eval('hpd', quiet=True)
area = tarea(bounds(meta), meta['height'])
out = float(ma.sum(ma.masked_invalid(values * area)) / 1e9)
return out
def project_year(model_dir, scenario, year):
print("projecting land use for %d using %s" % (year, scenario))
# Open forested/non-forested mask layer
fstnf = rasterio.open('netcdf:%s:fstnf' % utils.luh2_static())
# Import standard PREDICTS rasters
rastersf = predicts.rasterset('luh2', scenario, year, 'f')
rsf = RasterSet(rastersf, mask=fstnf, maskval=0.0)
rastersn = predicts.rasterset('luh2', scenario, year, 'n')
rsn = RasterSet(rastersn, mask=fstnf, maskval=1.0)
lus = ('annual', 'nitrogen', 'pasture', 'perennial', 'primary',
'rangelands', 'secondary', 'urban')
stime = time.time()
values = [eval(lu, rsf, rsn) for lu in lus]
cells = carea(bounds(values[0][1]))
area = ma.sum(cells)
out = dict((lu, float(ma.sum(values[idx][0])))# / area * 100))
for idx, lu in enumerate(lus))
etime = time.time()
print("executed in %6.2fs" % (etime - stime))
return out
def unpack(args):
return project_year(*args)
@cli.command()
@click.argument('scenario', type=click.Choice(utils.luh2_scenarios()))
@click.argument('years', type=YEAR_RANGE)
@click.argument('output', type=click.File('wb'))
@click.option('--model-dir', '-m', type=click.Path(file_okay=False))
@click.option('--history', type=click.File('rb'))
@click.option('--parallel', '-p', default=1, type=click.INT,
help='How many projections to run in parallel (default: 1)')
def timeline(scenario, years, output, model_dir, history, parallel):
if parallel == 1:
data = map(lambda y: project_year(model_dir, scenario, y), years)
else:
pool = multiprocessing.Pool(processes=parallel)
data = pool.map(unpack, itertools.izip(itertools.repeat(model_dir),
itertools.repeat(scenario), years))
lus = set(map(lambda xx: tuple(xx.keys()), data))
assert len(lus) == 1
lus = lus.pop()
by_series = [{'name': lu, 'data': []} for lu in lus]
for lu in by_series:
for year in data:
lu['data'].append(year[lu['name']])
if history:
hist = json.load(history)
years = hist['years'] + years
hist_map = dict((xx['name'], xx['data']) for xx in hist['data'])
for row in by_series:
if row['name'] in hist_map:
row['data'] = hist_map[row['name']] + row['data']
output.write(json.dumps({'years': years, 'data': by_series}))
output.write("\n")
@cli.command()
@click.argument('output', type=click.File('wb'))
@click.option('--parallel', '-p', default=1, type=click.INT,
help='How many projections to run in parallel (default: 1)')
def hpd(output, parallel):
out = []
for scenario in utils.luh2_scenarios():
if scenario == 'historical':
years = range(1950, 2011)
else:
years = range(2015, 2100)
if True or parallel == 1:
data = map(lambda y: project_hpd(scenario, y), years)
else:
pool = multiprocessing.Pool(processes=parallel)
data = pool.map(unpack, itertools.izip(itertools.repeat(scenario), years))
if scenario != 'historical':
data = [None] * (2010 - 1950) + data
out.append({'name': scenario, 'data': data})
all_years = range(1950, 2011) + range(2015, 2100)
output.write(json.dumps({'years': all_years, 'data': out}))
output.write("\n")
if __name__ == '__main__':
cli()
| apache-2.0 |
levjj/rde | benchmarks/plots.py | 1 | 2226 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
data = np.genfromtxt('data.csv',
delimiter=',',
names=True,
dtype="S9,S9,S9,i8,i8,f8,f8")
strategies = np.unique(data['Strategy'])
shapes = np.unique(data['StateShape'])
benchmarks = np.unique(data['Benchmark'])
for strat in strategies:
ds = data[data['Strategy'] == strat]
for benchmark in benchmarks:
db = ds[ds['Benchmark'] == benchmark]
size = np.array([d[3] for d in db])
size_t = np.sort(np.unique(size))
events = np.array([d[4] for d in db])
events_t = np.sort(np.unique(events))
time = np.array([d[5] for d in db])
memory = np.array([d[6] for d in db])
cr = ['r' for k in size]
cg = ['g' for k in size]
fig = plt.figure()
fig.suptitle(benchmark + ' with ' + strat + ' Strategy', fontsize=14, fontweight='bold')
atim = fig.add_subplot(211)
atim.set_xlabel('Size of State')
atim.set_ylabel(u'Time in μs', color='r')
atim.tick_params(axis='y', color='r', labelcolor='r')
atim.scatter(size, time, color=cr, marker='+')
means = [np.mean([d[5] for d in db if d[3] == s]) for s in size_t]
atim.plot(size_t, means, color='r')
amem = atim.twinx();
amem.set_ylabel(u'Memory in bytes', color='g')
amem.tick_params(axis='y', color='g', labelcolor='g')
amem.scatter(size, memory, color=cg, marker='x')
means = [np.mean([d[6] for d in db if d[3] == s]) for s in size_t]
amem.plot(size_t, means, color='g')
atim = fig.add_subplot(212)
atim.set_xlabel('Number of Events')
atim.set_ylabel(u'Time in μs', color='r')
atim.tick_params(axis='y', color='r', labelcolor='r')
atim.scatter(events, time, color=cr, marker='+')
means = [np.mean([d[5] for d in db if d[4] == e]) for e in events_t]
atim.plot(size_t, means, color='r')
amem = atim.twinx();
amem.set_ylabel(u'Memory in bytes', color='g')
amem.tick_params(axis='y', color='g', labelcolor='g')
amem.scatter(events, memory, color=cg, marker='x')
means = [np.mean([d[6] for d in db if d[4] == e]) for e in events_t]
amem.plot(size_t, means, color='g')
# plt.show()
plt.savefig(benchmark + '_' + strat + '.pdf')
| isc |
RamaneekGill/Twitter-Hashtag-Prediction | NaiveBayes/NaiveBayesv2.py | 1 | 14014 | from __future__ import division
import csv
import random
import math
import sys
import string
import re
import time
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import operator
from scipy.sparse import *
from scipy.misc import imread
from scipy.misc import imresize
import matplotlib.image as mpimg
from scipy import ndimage
from pandas import *
import numpy
from numpy import *
from PIL import Image
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import pickle
def main():
naiveBayes = NaiveBayes()
CONST_EPSILON_INTERVALS = [0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001, 0.0000001, 0.00000001, 0.000000001, 0.0000000001, 0.00000000001]
CONST_ALPHA_INTERVALS = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99, 1]
CONST_HASHTAGS_TO_PREDICT = [56, 100, 150, 200, 250, 300, 350, 400, 450, 500]
CONST_HASHTAG_PREDICTION_RANGE = [20, 15, 10, 5, 3, 1]
CONST_TEST_RATIOS = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
CONST_TEST_SIZE = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in range(len(CONST_TEST_RATIOS)):
CONST_TEST_SIZE[i] = CONST_TEST_RATIOS[i] * len(naiveBayes.testSet)
BEST_ALPHA = 0.92
BEST_EPSILON = 0.01
# For 500 hashtags and predicting top 5
BEST_ALPHA = 0.9
BEST_EPSILON = 1e-09
naiveBayes.setEpsilon(BEST_EPSILON)
naiveBayes.setAlpha(BEST_ALPHA)
# naiveBayes.testAgainst(naiveBayes.testSet)
naiveBayes.testAgainst(naiveBayes.testSet)
print('CORRECT PREDICTIONS~~~~~~~~~~~~~~~~~~~~~~~~~')
naiveBayes.getProbabilityResults(naiveBayes.correctPredictions)
print('\n\n\n\n\n\n\n\n\n\n\n')
print('INCORRECT PREDICTIONS~~~~~~~~~~~~~~~~~~~~~~~~~')
naiveBayes.getProbabilityResults(naiveBayes.incorrectPredictions)
# print('Performing cross validation to find the best epsilon and alpha values')
# accuracies = []
# maxAccuracy = 0
# for epsilon in CONST_EPSILON_INTERVALS:
# for alpha in CONST_ALPHA_INTERVALS:
# naiveBayes.setEpsilon(epsilon)
# naiveBayes.setAlpha(alpha)
# naiveBayes.testAgainst(naiveBayes.validationSet)
# accuracy = naiveBayes.getAccuracy()
# accuracies.append(accuracy)
# if max(accuracies) > maxAccuracy:
# BEST_EPSILON = epsilon
# BEST_ALPHA = alpha
# maxAccuracy = max(accuracies)
# print('Validation tests have shown that the best epsilon value to use is: {}, best alpha value is: {}'.format(BEST_EPSILON, BEST_ALPHA))
# print('Generating graph for varying number of hashtags predicted contain target')
# accuracies = []
# for hitRange in CONST_HASHTAG_PREDICTION_RANGE:
# naiveBayes.setHitRange(hitRange)
# naiveBayes.testAgainst(naiveBayes.testSet)
# accuracy = naiveBayes.getAccuracy()
# accuracies.append(accuracy)
# plt.plot(CONST_HASHTAG_PREDICTION_RANGE, accuracies)
# plt.xlabel('Number Of Hashtags Predicted')
# plt.ylabel('Accuracy')
# plt.title('Accuracy when Varying Number of Hashtags Predicted Contain Target Hashtag')
# plt.show()
# print('Generating graph for varying number of hashtags to predict')
# accuracies = []
# for numHashtags in CONST_HASHTAGS_TO_PREDICT:
# naiveBayes.setHashtagsToPredict(numHashtags)
# naiveBayes.testAgainst(naiveBayes.testSet)
# accuracy = naiveBayes.getAccuracy()
# accuracies.append(accuracy)
# plt.plot(CONST_HASHTAGS_TO_PREDICT, accuracies)
# plt.xlabel('Number Of Hashtags To Predict')
# plt.ylabel('Accuracy')
# plt.title('Accuracy when Varying Number of Hashtags to Predict')
# plt.show()
# print('Generating graph for epsilon accuracies')
# epsilonAccuracies = []
# alpha = BEST_ALPHA
# for epsilon in CONST_EPSILON_INTERVALS:
# naiveBayes.setEpsilon(epsilon)
# naiveBayes.setAlpha(alpha)
# naiveBayes.testAgainst(naiveBayes.testSet)
# accuracy = naiveBayes.getAccuracy()
# epsilonAccuracies.append(accuracy)
# plt.plot(CONST_EPSILON_INTERVALS, epsilonAccuracies)
# plt.xscale('log', nonposy='clip')
# plt.xlabel('Epsilon')
# plt.ylabel('Accuracy')
# plt.title('Accuracy on Test Set Using Alpha = {}'.format(alpha))
# plt.show()
# print('Generating graph for alpha accuracies')
# alphaAccuracies = []
# epsilon = BEST_EPSILON
# for alpha in CONST_ALPHA_INTERVALS:
# naiveBayes.setEpsilon(epsilon)
# naiveBayes.setAlpha(alpha)
# naiveBayes.testAgainst(naiveBayes.testSet)
# accuracy = naiveBayes.getAccuracy()
# alphaAccuracies.append(accuracy)
# plt.plot(CONST_ALPHA_INTERVALS, alphaAccuracies)
# plt.xlabel('Alpha')
# plt.ylabel('Accuracy')
# plt.title('Accuracy on Test Set Using Epsilon = {}'.format(epsilon))
# plt.show()
# print('Generating graph for test set size variances')
# accuracies = []
# for testRatio in CONST_TEST_RATIOS:
# naiveBayes = NaiveBayes(BEST_EPSILON, BEST_ALPHA, 0.1, testRatio)
# naiveBayes.testAgainst(naiveBayes.testSet)
# accuracy = naiveBayes.getAccuracy()
# accuracies.append(accuracy)
# plt.plot(CONST_TEST_SIZE, accuracies)
# plt.xlabel('Number of Tweets In Test Set')
# plt.ylabel('Accuracy')
# plt.title('Accuracy on Test Set Using Epsilon = {}, Alpha = {}'.format(BEST_EPSILON, BEST_ALPHA))
# plt.show()
class NaiveBayes:
# Need to test these still
BEST_EPSILON = 0.01
BEST_ALPHA = 0.92
CONST_RANDOM_SEED = 20150819
# CONST_TO_PREDICT = 56
# CONST_HIT_RANGE = 20
# For our tests
CONST_TO_PREDICT = 500
CONST_HIT_RANGE = 5
def __init__(self, epsilon = 0.01, alpha = 0.92, validation_ratio = 0.1, test_ratio = 0.5):
print('Intializing NaiveBayes')
self.epsilon = epsilon
self.alpha = alpha
self.validation_ratio = validation_ratio
self.test_ratio = test_ratio
with open('../stopwords.txt') as f:
self.stopWords = f.read().splitlines()
filename = '../training.1600000.processed.noemoticon.csv'
self.dataset = self.readCsv(filename) # Contains only tweets with hashtags
self.validationSet, self.dataset = self.splitDataset(self.dataset, self.validation_ratio)
self.testSet, self.trainSet = self.splitDataset(self.dataset, self.test_ratio)
self.wordCounts, self.hashtagCounts = self.generateCounts()
self.wordsMappedToHashtags = self.generateHashtagSpecificVocabulary()
self.hashtagsToPredict = self.getHashtagsToPredict()
self.hitRange = NaiveBayes.CONST_HIT_RANGE
self.correctPredictions = []
self.incorrectPredictions = []
def generateCounts(self):
wordCounts = {}
hashtagCounts = {}
for tweet in self.trainSet:
hashtags = []
for word in tweet.split():
if word.startswith('#') and len(word) > 2:
word = word.lower().translate(string.maketrans("",""), string.punctuation) # remove punctuation
hashtags.append(word)
if word not in wordCounts:
wordCounts[word] = 1
else:
wordCounts[word] += 1
else:
if '@' in word:
continue
if word in self.stopWords:
continue
word = word.lower().translate(string.maketrans("",""), string.punctuation) # remove punctuation
if word not in wordCounts:
wordCounts[word] = 1
else:
wordCounts[word] += 1
for hashtag in hashtags:
if hashtag not in hashtagCounts:
hashtagCounts[hashtag] = 1.0
else:
hashtagCounts[hashtag] += 1.0
return wordCounts, hashtagCounts
def readCsv(self, filename):
corpus = read_csv(filename)
corpus.columns = ["1", "2", "3", "4", "5", "tweet"]
corpus = corpus["tweet"]
dataset = [tweet for tweet in corpus if '#' in tweet]
return dataset
def splitDataset(self, dataset, ratio):
idx = int(len(dataset) * ratio)
numpy.random.seed(NaiveBayes.CONST_RANDOM_SEED)
numpy.random.shuffle(dataset)
set1 = dataset[:idx]
set2 = dataset[idx:]
return set1, set2
numpy.random.seed(NaiveBayes.CONST_RANDOM_SEED)
idx = numpy.random.permutation(len(dataset))
testSet = array(dataset)[idx[len(idx)/2:]]
trainSet = array(dataset)[idx[:len(idx)/2]]
return testSet, trainSet
def generateHashtagSpecificVocabulary(self):
wordsMappedToHashtags = {}
for tweet in self.trainSet:
words = []
hashtags = []
for word in tweet.split():
if word.startswith('#') and len(word) > 2:
word = word.lower().translate(string.maketrans("",""), string.punctuation) # remove punctuation
hashtags.append(word)
words.append(word)
else:
if '@' in word:
continue
if word in self.stopWords:
continue
word = word.lower().translate(string.maketrans("",""), string.punctuation) # remove punctuation
words.append(word)
for hashtag in hashtags:
if hashtag not in wordsMappedToHashtags:
wordsMappedToHashtags[hashtag] = {}
for word in words:
if word not in wordsMappedToHashtags[hashtag]:
wordsMappedToHashtags[hashtag][word] = 1.0
else:
wordsMappedToHashtags[hashtag][word] += 1.0
return wordsMappedToHashtags
def getHashtagsToPredict(self):
return map(operator.itemgetter(0), sorted(self.hashtagCounts.items(), key=operator.itemgetter(1))[-NaiveBayes.CONST_TO_PREDICT:])
def testAgainst(self, testSet):
self.time = time.time()
i = 0
hits = 0
for tweet in testSet:
words = []
hashtags = []
for word in tweet.split():
if word.startswith('#') and len(word) > 2:
word = word.lower().translate(string.maketrans("",""), string.punctuation) # remove punctuation
hashtags.append(word)
# words.append(word) DON'T WANT TO BE AWARE OF THE HASHTAG IN TESTSET
else:
if '@' in word:
continue
if word in self.stopWords:
continue
word = word.lower().translate(string.maketrans("",""), string.punctuation) # remove punctuation
words.append(word)
if len(set(hashtags).intersection(self.hashtagsToPredict)) == 0:
continue # Can't predict any hashtags for this tweet
i += 1
probabilitiesMappedToHashtagsToPredict = {}
for hashtag in self.hashtagsToPredict:
prob = 0
for word in words:
prob += log(self.epsilon + self.wordsMappedToHashtags[hashtag].get(word, 0.0)) - log(self.hashtagCounts[hashtag])
probabilitiesMappedToHashtagsToPredict[hashtag] = self.alpha*log(self.hashtagCounts[hashtag]) + (1-self.alpha)*prob - log(len(self.trainSet))
# This is without priors
probabilitiesMappedToHashtagsToPredict[hashtag] = (1-self.alpha)*prob - log(len(self.trainSet))
topProbabilities = map(operator.itemgetter(0), sorted(probabilitiesMappedToHashtagsToPredict.items(), key=operator.itemgetter(1))[-self.hitRange:])
if len(set(hashtags).intersection(topProbabilities)) > 0:
hits += 1
# if len(self.correctPredictions) < 50:
self.correctPredictions.append(tweet)
else:
# if len(self.incorrectPredictions) < 50:
self.incorrectPredictions.append(tweet)
self.accuracy = hits/i
self.time = time.time() - self.time
print('Processed {} tweets, only {} had a predictable hashtag, accuracy was: {}, this took {} seconds. EPSILON: {} | ALPHA: {}'.format(len(testSet), i, self.accuracy, self.time, self.epsilon, self.alpha))
def getProbabilityResults(self, testSet):
i = 0
hits = 0
for tweet in testSet:
words = []
hashtags = []
for word in tweet.split():
if word.startswith('#') and len(word) > 2:
word = word.lower().translate(string.maketrans("",""), string.punctuation) # remove punctuation
hashtags.append(word)
# words.append(word) DON'T WANT TO BE AWARE OF THE HASHTAG IN TESTSET
else:
if '@' in word:
continue
if word in self.stopWords:
continue
word = word.lower().translate(string.maketrans("",""), string.punctuation) # remove punctuation
words.append(word)
if len(set(hashtags).intersection(self.hashtagsToPredict)) == 0:
continue # Can't predict any hashtags for this tweet
i += 1
probabilitiesMappedToHashtagsToPredict = {}
probPerWord = {}
for hashtag in self.hashtagsToPredict:
probPerWord[hashtag] = {}
prob = 0
for word in words:
prob += log(self.epsilon + self.wordsMappedToHashtags[hashtag].get(word, 0.0)) - log(self.hashtagCounts[hashtag])
probPerWord[hashtag][word] = prob
probabilitiesMappedToHashtagsToPredict[hashtag] = self.alpha*log(self.hashtagCounts[hashtag]) + (1-self.alpha)*prob - log(len(self.trainSet))
topProbabilities = map(operator.itemgetter(0), sorted(probabilitiesMappedToHashtagsToPredict.items(), key=operator.itemgetter(1))[-5:])
print('These are the probability results for the tweet with words: {}, hashtags associated = {}'.format(', '.join(words), ', '.join(hashtags)))
medianIndex = int(len(topProbabilities)/2)
i = 0
total = 0
minVal = 1000000000
maxVal = -1000000000
print(probPerWord[hashtag])
for hashtag in topProbabilities:
prob = sum(probPerWord[hashtag].values())
total += prob
if i == medianIndex:
median = hashtag
if prob < minVal:
minVal = prob
minHashtag = hashtag
if prob > maxVal:
maxVal = prob
maxHashtag = hashtag
i += 1
print(hashtag, probPerWord[hashtag])
print('Median is: |||{}||| with sum of {} for {}'.format(median, sum(probPerWord[median].values()), probPerWord[median]))
print('Min is: |||{}||| with sum of {} for {}'.format(minHashtag, sum(probPerWord[minHashtag].values()), probPerWord[minHashtag]))
print('Max is: |||{}||| with sum of {} for {}'.format(maxHashtag, sum(probPerWord[maxHashtag].values()), probPerWord[maxHashtag]))
print('Average of the summations is: {}'.format(total / len(topProbabilities)))
print('This is the probability result for the TARGET:')
print(hashtag, probPerWord[hashtag])
def getAccuracy(self):
return self.accuracy
def getEpsilon(self):
return self.epsilon
def getAlpha(self):
return self.alpha
def setEpsilon(self, value):
self.epsilon = value
def setAlpha(self, value):
self.alpha = value
def getTimePassed(self):
return self.time
def setHashtagsToPredict(self, numHashtags):
self.hashtagsToPredict = map(operator.itemgetter(0), sorted(self.hashtagCounts.items(), key=operator.itemgetter(1))[-numHashtags:])
def setHitRange(self, hitRange):
self.hitRange = hitRange
main()
| apache-2.0 |
idrigo/castawayplot | CTDplot.py | 1 | 3549 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 29 11:32:17 2017
@author: drigo
"""
#from IPython import get_ipython
#def __reset__(): get_ipython().magic('reset -sf')
from matplotlib import rcParams
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import config
def plot (figname,Txlim,Sxlim,major_ticsT,major_ticsS,minor_ticsT,minor_ticsS,df):
host = host_subplot(111, axes_class=AA.Axes)
par1 = host.twiny()
#создаем две новые оси координат, на par1 и на host
new_fixed_axis = par1.get_grid_helper().new_fixed_axis
par1.axis["top"] = new_fixed_axis(loc="top",
axes=par1,
offset=(0, 50))
new_fixed_axis = host.get_grid_helper().new_fixed_axis
host.axis["top"] = new_fixed_axis(loc="top",
axes=host,
offset=(0, 0))
#хз что происходит
par1.axis["top"].toggle(all=True)
host.toggle_axisline(True)
#выключаем правую и нижнюю ось
host.axis["bottom"].set_visible(False)
host.axis["right"].set_visible(False)
#задаем расстояние между основными ticks и вспомогательными
majorLocator = MultipleLocator(major_ticsT)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(minor_ticsT)
host.xaxis.set_major_locator(majorLocator)
host.xaxis.set_major_formatter(majorFormatter)
host.xaxis.set_minor_locator(minorLocator)
majorLocator = MultipleLocator(major_ticsS)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(minor_ticsS)
par1.xaxis.set_major_locator(majorLocator)
par1.xaxis.set_major_formatter(majorFormatter)
par1.xaxis.set_minor_locator(minorLocator)
#наконец-то рисуем графики
p1, = host.plot(df['Temp'],df['Depth'], "r-",linewidth=0.7)
p2, = par1.plot(df['Sal'],df['Depth'],'b-',linewidth=0.7)
#меняем цвет шкалы, тиксов и значений
host.axis["top"].line.set_color(p1.get_color())
host.axis["top"].major_ticks.set_color(p1.get_color())
host.axis["top"].major_ticklabels.set_color(p1.get_color())
par1.axis["top"].line.set_color(p2.get_color())
par1.axis["top"].major_ticks.set_color(p2.get_color())
par1.axis["top"].major_ticklabels.set_color(p2.get_color())
#меняем подписи осей
host.set_xlabel(u"Температура, ˚C")
host.set_ylabel(u"Глубина, м")
par1.set_xlabel(u"Соленость, PSU")
#меняем цвет подписи осей
host.xaxis.label.set_color(p1.get_color())
par1.xaxis.label.set_color(p2.get_color())
#рисуем сетку TODO - оставить только y
host.grid(True, linestyle=':')
#пределы по осям
host.set_xlim(Txlim[0], Txlim[1])
par1.set_xlim(Sxlim[0],Sxlim[1])
#устанавливаем соотношение сторон графика
plt.gca().set_aspect(0.5, adjustable='box-forced')
#переворачиваем ось y
plt.gca().invert_yaxis()
#сохраняемся
plt.savefig(config.out_dir+figname,dpi=300,format='png',bbox_inches='tight')
#plt.show()
plt.clf() | mit |
pianomania/scikit-learn | examples/plot_kernel_ridge_regression.py | 26 | 6289 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0] // 5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors',
zorder=2)
plt.scatter(X[:100], y[:100], c='k', label='data', zorder=1)
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0] // 5))
sizes = np.logspace(1, 4, 7, dtype=np.int)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="neg_mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="neg_mean_squared_error", cv=10)
plt.plot(train_sizes, -test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, -test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
treycausey/scikit-learn | sklearn/dummy.py | 2 | 13705 | # Author: Mathieu Blondel <[email protected]>
# Arnaud Joly <[email protected]>
# Maheshakya Wijewardena<[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.validation import safe_asarray
from sklearn.utils import deprecated
class DummyClassifier(BaseEstimator, ClassifierMixin):
"""
DummyClassifier is a classifier that makes predictions using simple rules.
This classifier is useful as a simple baseline to compare with other
(real) classifiers. Do not use it for real problems.
Parameters
----------
strategy: str
Strategy to use to generate predictions.
* "stratified": generates predictions by respecting the training
set's class distribution.
* "most_frequent": always predicts the most frequent label in the
training set.
* "uniform": generates predictions uniformly at random.
* "constant": always predicts a constant label that is provided by
the user. This is useful for metrics that evaluate a non-majority
class
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use.
constant: int or str or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
`classes_` : array or list of array of shape = [n_classes]
Class labels for each output.
`n_classes_` : array or list of array of shape = [n_classes]
Number of label for each output.
`class_prior_` : array or list of array of shape = [n_classes]
Probability of each class for each output.
`n_outputs_` : int,
Number of outputs.
`outputs_2d_` : bool,
True if the output at fit is 2d, else false.
"""
def __init__(self, strategy="stratified", random_state=None,
constant=None):
self.strategy = strategy
self.random_state = random_state
self.constant = constant
def fit(self, X, y):
"""Fit the random classifier.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("most_frequent", "stratified", "uniform",
"constant"):
raise ValueError("Unknown strategy type.")
y = np.atleast_1d(y)
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
self.classes_ = []
self.n_classes_ = []
self.class_prior_ = []
if self.strategy == "constant":
if self.constant is None:
raise ValueError("Constant target value has to be specified "
"when the constant strategy is used.")
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError("Constant target value should have "
"shape (%d, 1)." % self.n_outputs_)
for k in xrange(self.n_outputs_):
classes, y_k = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
self.n_classes_.append(classes.shape[0])
self.class_prior_.append(np.bincount(y_k) / float(y_k.shape[0]))
# Checking in case of constant strategy if the constant provided
# by the user is in y.
if self.strategy == "constant":
if constant[k] not in self.classes_[k]:
raise ValueError("The constant target value must be "
"present in training data")
if self.n_outputs_ == 1 and not self.output_2d_:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = safe_asarray(X)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self.strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
y = []
for k in xrange(self.n_outputs_):
if self.strategy == "most_frequent":
ret = np.ones(n_samples, dtype=int) * class_prior_[k].argmax()
elif self.strategy == "stratified":
ret = proba[k].argmax(axis=1)
elif self.strategy == "uniform":
ret = rs.randint(n_classes_[k], size=n_samples)
elif self.strategy == "constant":
ret = np.ones(n_samples, dtype=int) * (
np.where(classes_[k] == constant[k]))
y.append(classes_[k][ret])
y = np.vstack(y).T
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-lke of shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = safe_asarray(X)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1 and not self.output_2d_:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in xrange(self.n_outputs_):
if self.strategy == "most_frequent":
ind = np.ones(n_samples, dtype=int) * class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self.strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
elif self.strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self.strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1 and not self.output_2d_:
P = P[0]
return P
def predict_log_proba(self, X):
"""
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-like of shape = [n_samples, n_classes]
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically for each
output.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
return [np.log(p) for p in proba]
class DummyRegressor(BaseEstimator, RegressorMixin):
"""
DummyRegressor is a regressor that makes predictions using
simple rules.
This regressor is useful as a simple baseline to compare with other
(real) regressors. Do not use it for real problems.
Parameters
----------
strategy: str
Strategy to use to generate predictions.
* "mean": always predicts the mean of the training set
* "median": always predicts the median of the training set
* "constant": always predicts a constant value that is provided by
the user.
constant: int or float or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
`constant_' : float or array of shape [n_outputs]
Mean or median of the training targets or constant value given the by
the user.
`n_outputs_` : int,
Number of outputs.
`outputs_2d_` : bool,
True if the output at fit is 2d, else false.
"""
def __init__(self, strategy="mean", constant=None):
self.strategy = strategy
self.constant = constant
@property
@deprecated('This will be removed in version 0.17')
def y_mean_(self):
if self.strategy == 'mean':
return self.constant_
raise AttributeError
def fit(self, X, y):
"""Fit the random regressor.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("mean", "median", "constant"):
raise ValueError("Unknown strategy type: %s, "
"expected 'mean', 'median' or 'constant'"
% self.strategy)
y = safe_asarray(y)
self.output_2d_ = (y.ndim == 2)
if self.strategy == "mean":
self.constant_ = np.reshape(np.mean(y, axis=0), (1, -1))
elif self.strategy == "median":
self.constant_ = np.reshape(np.median(y, axis=0), (1, -1))
elif self.strategy == "constant":
if self.constant is None:
raise TypeError("Constant target value has to be specified "
"when the constant strategy is used.")
self.constant = safe_asarray(self.constant)
if self.output_2d_ and self.constant.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have "
"shape (%d, 1)." % y.shape[1])
self.constant_ = np.reshape(self.constant, (1, -1))
self.n_outputs_ = np.size(self.constant_) # y.shape[1] is not safe
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "constant_"):
raise ValueError("DummyRegressor not fitted.")
X = safe_asarray(X)
n_samples = X.shape[0]
y = np.ones((n_samples, 1)) * self.constant_
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
| bsd-3-clause |
rew4332/tensorflow | tensorflow/contrib/learn/python/learn/tests/data_feeder_test.py | 24 | 8691 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
# pylint: enable=wildcard-import
class DataFeederTest(tf.test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def test_input_uint32(self):
self._assert_raises(np.matrix([[1, 2], [3, 4]], dtype=np.uint32))
def test_input_uint64(self):
self._assert_raises(np.matrix([[1, 2], [3, 4]], dtype=np.uint64))
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with tf.Graph().as_default() as g, self.test_session(g):
inp, _ = feeder.input_builder()
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
self._assert_dtype(
np.int8, tf.int8, np.matrix([[1, 2], [3, 4]], dtype=np.int8))
def test_input_int16(self):
self._assert_dtype(
np.int16, tf.int16, np.matrix([[1, 2], [3, 4]], dtype=np.int16))
def test_input_int32(self):
self._assert_dtype(
np.int32, tf.int32, np.matrix([[1, 2], [3, 4]], dtype=np.int32))
def test_input_int64(self):
self._assert_dtype(
np.int64, tf.int64, np.matrix([[1, 2], [3, 4]], dtype=np.int64))
def test_input_uint8(self):
self._assert_dtype(
np.uint8, tf.uint8, np.matrix([[1, 2], [3, 4]], dtype=np.uint8))
def test_input_uint16(self):
self._assert_dtype(
np.uint16, tf.uint16, np.matrix([[1, 2], [3, 4]], dtype=np.uint16))
def test_input_float16(self):
self._assert_dtype(
np.float16, tf.float16, np.matrix([[1, 2], [3, 4]], dtype=np.float16))
def test_input_float32(self):
self._assert_dtype(
np.float32, tf.float32, np.matrix([[1, 2], [3, 4]], dtype=np.float32))
def test_input_float64(self):
self._assert_dtype(
np.float64, tf.float64, np.matrix([[1, 2], [3, 4]], dtype=np.float64))
def test_input_bool(self):
self._assert_dtype(
np.bool, tf.bool,
np.array([[False for _ in xrange(2)] for _ in xrange(2)]))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, tf.string, input_data)
def test_unsupervised(self):
data = np.matrix([[1, 2], [2, 3], [3, 4]])
feeder = data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1)
with self.test_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[1, 2]])
def test_data_feeder_regression(self):
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
df = data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[3, 4], [1, 2]])
self.assertAllClose(feed_dict[out.name], [2, 1])
def test_epoch(self):
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
feeder = data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1)
with self.test_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
def test_data_feeder_multioutput_regression(self):
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
df = data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[3, 4], [1, 2]])
self.assertAllClose(feed_dict[out.name], [[3, 4], [1, 2]])
def test_data_feeder_multioutput_classification(self):
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
df = data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[3, 4], [1, 2]])
self.assertAllClose(feed_dict[out.name],
[[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]])
def test_streaming_data_feeder(self):
def x_iter():
yield np.array([1, 2])
yield np.array([3, 4])
def y_iter():
yield np.array([1])
yield np.array([2])
df = data_feeder.StreamingDataFeeder(x_iter(),
y_iter(),
n_classes=0,
batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[1, 2], [3, 4]])
self.assertAllClose(feed_dict[out.name], [1, 2])
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(dict(a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
def test_hdf5_data_feeder(self):
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x = h5f['x']
y = h5f['y']
df = data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[3, 4], [1, 2]])
self.assertAllClose(feed_dict[out.name], [2, 1])
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(tf.test.TestCase):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
x = iter([[1, 2], [3, 4], [5, 6]])
df = data_feeder.setup_predict_data_feeder(x, batch_size=2)
self.assertAllClose(six.next(df), [[1, 2], [3, 4]])
self.assertAllClose(six.next(df), [[5, 6]])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
ikaee/bfr-attendant | facerecognitionlibrary/jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/multioutput_test.py | 7 | 1660 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
class MultiOutputTest(tf.test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
target_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(regressor.predict(x), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
frank-tancf/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
Tarskin/LaCyTools | LaCyTools.py | 1 | 178953 | #! /usr/bin/env python
#
# Copyright 2014-2016 Bas C. Jansen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a coyp of the Apache 2.0 license along
# with this program; if not, see
# http://www.apache.org/licenses/LICENSE-2.0
from datetime import datetime
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk
)
from scipy.interpolate import InterpolatedUnivariateSpline
import scipy.optimize
from scipy.optimize import curve_fit
#from Tkinter import *
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import ttk
import base64
import collections
import glob
import itertools
import linecache
import math
import matplotlib.pyplot as plt
import matplotlib
import numpy
import os
import struct
import sys
import zlib
import tables
# Dev Imports
#import timeit
#import inspect
tables.parameters.MAX_NUMEXPR_THREADS = None
tables.parameters.MAX_BLOSC_THREADS = None
# File Parameters
EXTENSION = ".mzXML" # File types that will be used by MassyTools
EXTRACTION = "aligned" # Pre-fix required for files to be extracted
OUTPUT = "Summary.txt" # Name of the output file
SETTINGS_FILE = "Settings.txt" # Name of the settings file
OVERWRITE_ANALYTES = True # This option specifies if LaCyTools should overwrite an existing analytes.ref file or not
# Alignment Parameters
ALIGNMENT_TIME_WINDOW = 10 # The +/- time window that the program is allowed to look for the feature for alignment (EIC time axis)
ALIGNMENT_MASS_WINDOW = 0.1 # The +/- m/z window (not charge state corrected) that is used to detect the feature used for alignment. Afterwards a spline fit is used to detect the measured time
ALIGNMENT_BACKGROUND_MULTIPLIER = 2 # The multiplier of the timewindow used for background determination
ALIGNMENT_S_N_CUTOFF = 9 # The minimum S/N value of a feature to be used for alignment
ALIGNMENT_MIN_PEAK = 5 # The minimum number of features used for alignment
# Calibration Parameters
SUM_SPECTRUM_RESOLUTION = 100 # Number of data points per 1 whole m/z unit
CALIB_MASS_WINDOW = 0.5 # This +/- mass window (in Dalton) used to detect the accurate mass of a calibra
CALIB_S_N_CUTOFF = 9 # The minimum S/N value of a feature to be used for calibration
CALIB_MIN_PEAK = 3 # Minimum number of calibrants
# PARAMETERS
MASS_MODIFIERS = [] # The mass modifiers refer to changes to the analyte
CHARGE_CARRIER = ['proton'] # The charge carrier that is used for ionization
# Extraction Parameters
EXTRACTION_TYPE = 2 # 1 = Max, 0 = Total and 2 = Area
MASS_WINDOW = 0.2 # The +/- m/z window used around each feature for extraction
TIME_WINDOW = 8 # The +/- time window that will be used around a cluster, to create the sum spectrum
EXTRACTION_PADDING = 2 # total number of additional windows to be examined and quantified (for IPQ)
MIN_CHARGE = 2 # The minimum charge state that the program will integrate for all features (unless overwritten in the composition file)
MAX_CHARGE = 3 # The maximum charge state that the program will integrate for all features (unless overwritten in the composition file)
#MIN_CONTRIBUTION = 0.01 # Minimum contribution to isotopic distrubition to be included (NOT BEING USED ATM)
MIN_TOTAL = 0.95 # Desired contribution of extracted isotopes of total isotopic pattern
BACKGROUND_WINDOW = 10 # Total m/z window (+ and -) to search for background
S_N_CUTOFF = 9 # Minimum signal to noise value of an analyte to be included in the percentage QC
# The maximum distance between distinct isotopic masses to be 'pooled'
EPSILON = 0.5 # DO NOT TOUCH THIS UNLESS YOU KNOW WTF YOU ARE DOING! Read below if you truly want to know the meaning:
# This value represents the maximum distance (in Da) for which the element specific isotopic mass defect will be combined
# Isotopic Mass Differences
C = [('13C',0.0107,1.00335)]
H = [('2H',0.00012,1.00628)]
N = [('15N',0.00364,0.99703)]
O18 = [('18O',0.00205,2.00425)]
O17 = [('17O',0.00038,1.00422)]
S33 = [('33S',0.0076,0.99939)]
S34 = [('34S',0.0429,1.9958)]
S36 = [('36S',0.0002,3.99501)]
# Read the building blocks
# TODO: Move this inside the app
BLOCKS = {}
for file in glob.glob("./blocks/*.block"):
block = os.path.splitext(os.path.basename(file))[0]
keys = []
values = []
with open(file,'r') as fr:
for line in fr:
key, value = line.rstrip().split()
keys.append(key)
try:
value = int(value)
except ValueError:
value = float(value)
values.append(value)
BLOCKS[block] = dict(zip(keys,values))
# Verify the blocks
for k,v in BLOCKS.items():
try:
if type(v['mass']) != float:
raise TypeError('Mass is not a float.')
if type(v['carbons']) != int:
raise TypeError('Carbons is not an integer.')
if type(v['hydrogens']) != int:
raise TypeError('Hydrogens is not an integer.')
if type(v['nitrogens']) != int:
raise TypeError('Nitrogens is not an integer.')
if type(v['oxygens']) != int:
raise TypeError('Oxygens is not an integer.')
if type(v['sulfurs']) != int:
raise TypeError('Sulfurs is not an integer.')
if type(v['available_for_charge_carrier']) != int:
raise TypeError('Charge carrier is not an integer.')
if v['available_for_charge_carrier'] not in [0,1]:
raise TypeError('Charge carrier is not 0 or 1.')
except:
root = tk.Tk()
root.withdraw()
messagebox.showinfo("Block Error","An error was observed in block "+str(k)+
". Please correct this block before running LaCyTools again.")
sys.exit()
UNITS = BLOCKS.keys()
###################
# DATA STRUCTURES #
###################
class Analyte():
def __init__(self):
self.composition = None
self.mass = None
self.massWindow = None
self.time = None
self.timeWindow = None
self.minCharge = None
self.maxCharge = None
self.isotopes = None
class Isotope():
def __init__(self):
self.isotope = None
self.charge = None
self.mass = None
self.obsInt = None
self.obsMax = None
self.expInt = None
self.qc = None
self.background = None
self.backgroundPoint = None
self.noise = None
################################################################################################
# Tooltip code - Taken from http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml #
################################################################################################
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except tk.TclError:
pass
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
background="#ffffe0", relief=tk.SOLID, borderwidth=1,
wraplength=500, font=("tahoma", "8", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
###############################
# Start of actual application #
###############################
class App():
def __init__(self,master):
# VARIABLES
self.master = master
self.version = "1.1.0-alpha"
self.build = "190207b"
self.inputFile = ""
self.inputFileIdx = 0
self.refFile = ""
self.alFile = ""
self.calFile = tk.IntVar()
self.ptFile = None
self.rmMZXML = tk.IntVar()
self.batchFolder = ""
self.batchProcessing = 0
self.batchWindow = 0
self.dataWindow = 0
self.outputWindow = 0
self.analyteIntensity = tk.IntVar()
self.analyteRelIntensity = tk.IntVar()
self.analyteBackground = tk.IntVar()
self.analyteNoise = tk.IntVar()
self.analytePerCharge = tk.IntVar()
self.analyteBckSub = tk.IntVar()
self.normalizeCluster = tk.IntVar()
self.alignmentQC = tk.IntVar()
self.qualityControl = tk.IntVar()
self.spectraQualityControl = tk.IntVar()
self.log = True
# Background can be determined in two ways
# Options are 'MIN', 'MEDIAN' and 'NOBAN'
self.background = "MIN"
# Nose can be determined in multiple ways
# Options are 'RMS' and 'MM'
self.noise = "RMS"
self.fig = matplotlib.figure.Figure(figsize=(12, 6))
# Attempt to retrieve previously saved settings from settingsfile
if os.path.isfile('./'+str(SETTINGS_FILE)):
self.getSettings()
# The LacyTools Logo (Placeholder figure)
if os.path.isfile('./UI/LaCyTools.png'):
background_image = self.fig.add_subplot(111)
image = matplotlib.image.imread('./ui/LaCyTools.png')
background_image.axis('off')
self.fig.set_tight_layout(True)
background_image.imshow(image)
# The Canvas
self.canvas = FigureCanvasTkAgg(self.fig, master = master)
self.toolbar = NavigationToolbar2Tk(self.canvas, root)
self.canvas.get_tk_widget().pack(fill=tk.BOTH,expand=tk.YES)
self.canvas.draw()
# FRAME
frame = tk.Frame(master)
master.title("LaCyTools")
# MENU
menu = tk.Menu(root)
root.config(menu = menu)
filemenu = tk.Menu(menu,tearoff=0)
menu.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="Open Input File", command = self.openFile)
extractmenu = tk.Menu(menu,tearoff=0)
menu.add_cascade(label="Extraction", menu=extractmenu)
extractmenu.add_command(label="Open ref file", command = self.openRefFile)
extractmenu.add_command(label="Extract", command = self.extractData)
menu.add_command(label="Batch Process", command = lambda: self.batchPopup(self))
menu.add_command(label="Data Storage", command = lambda: self.dataPopup(self))
menu.add_command(label="Settings", command = lambda: self.settingsPopup(self))
def selectIsotopes(self, results):
""" TODO
"""
# Sort by increasing m/z (x[0])
results.sort(key=lambda x: x[0])
# Add index of the isotope
foo = []
for index,i in enumerate(results):
foo.append((i[0],i[1],index))
# Sort by decreasing fraction (x[1])
results = foo
results.sort(key=lambda x: x[1], reverse=True)
contribution = 0.0
foo = []
# Take only the highest fraction isotopes until the contribution
# exceeds the MIN_TOTAL
for i in results:
contribution += float(i[1])
foo.append(i)
if contribution > MIN_TOTAL:
break
results = foo
# Sort by increasing m/z (x[0])
results.sort(key=lambda x: x[0])
return results
def settingsPopup(self,master):
""" This function creates a window in which the user can change
all the parameters that are for normal use. Certain advanced
settings such as the extraction type and noise determination
method remain hidden from the user through this window.
"""
def close(self):
""" This function closes the settings popup and applies
all the entered values to the parameters.
"""
global ALIGNMENT_TIME_WINDOW
global ALIGNMENT_MASS_WINDOW
global ALIGNMENT_S_N_CUTOFF
global ALIGNMENT_MIN_PEAK
global CALIB_MASS_WINDOW
global CALIB_S_N_CUTOFF
global CALIB_MIN_PEAK
global SUM_SPECTRUM_RESOLUTION
global MASS_WINDOW
global TIME_WINDOW
global MIN_CHARGE
global MAX_CHARGE
global CHARGE_CARRIER
global MIN_TOTAL
global BACKGROUND_WINDOW
global S_N_CUTOFF
global EXTRACTION_PADDING
ALIGNMENT_TIME_WINDOW = float(self.alignTimeWindow.get())
ALIGNMENT_MASS_WINDOW = float(self.alignMassWindow.get())
ALIGNMENT_S_N_CUTOFF = int(self.alignSn.get())
ALIGNMENT_MIN_PEAK = int(self.alignMin.get())
CALIB_MASS_WINDOW = float(self.calibMassWindow.get())
CALIB_S_N_CUTOFF = int(self.calibSn.get())
CALIB_MIN_PEAK = int(self.calibMin.get())
SUM_SPECTRUM_RESOLUTION = int(self.sumSpec.get())
MASS_WINDOW = float(self.extracMassWindow.get())
TIME_WINDOW = float(self.extracTimeWindow.get())
MIN_CHARGE = int(self.extracMinCharge.get())
MAX_CHARGE = int(self.extracMaxCharge.get())
EXTRACTION_PADDING = int(self.extracPad.get())
CHARGE_CARRIER = []
for i in UNITS:
if str(i) == master.chargeCarrierVar.get() and BLOCKS[i]['available_for_charge_carrier'] == 1:
CHARGE_CARRIER.append(i)
MIN_TOTAL = float(self.extracMinTotal.get())
BACKGROUND_WINDOW = int(self.extracBack.get())
S_N_CUTOFF = int(self.extracSnCutoff.get())
master.measurementWindow = 0
top.destroy()
def save(self):
""" This function saves all changed settings to the
settings file.
"""
global CHARGE_CARRIER
CHARGE_CARRIER = []
for i in UNITS:
if str(i) == master.chargeCarrierVar.get() and BLOCKS[i]['available_for_charge_carrier'] == 1:
CHARGE_CARRIER.append(i)
with open(SETTINGS_FILE,'w') as fw:
fw.write("ALIGNMENT_TIME_WINDOW\t"+str(float(self.alignTimeWindow.get()))+"\n")
fw.write("ALIGNMENT_MASS_WINDOW\t"+str(float(self.alignMassWindow.get()))+"\n")
fw.write("ALIGNMENT_S_N_CUTOFF\t"+str(int(self.alignSn.get()))+"\n")
fw.write("ALIGNMENT_MIN_PEAK\t"+str(int(self.alignMin.get()))+"\n")
fw.write("CALIB_MASS_WINDOW\t"+str(float(self.calibMassWindow.get()))+"\n")
fw.write("CALIB_S_N_CUTOFF\t"+str(int(self.calibSn.get()))+"\n")
fw.write("CALIB_MIN_PEAK\t"+str(int(self.calibMin.get()))+"\n")
fw.write("SUM_SPECTRUM_RESOLUTION\t"+str(int(self.sumSpec.get()))+"\n")
fw.write("MASS_WINDOW\t"+str(float(self.extracMassWindow.get()))+"\n")
fw.write("TIME_WINDOW\t"+str(float(self.extracTimeWindow.get()))+"\n")
fw.write("MIN_CHARGE\t"+str(int(self.extracMinCharge.get()))+"\n")
fw.write("MAX_CHARGE\t"+str(int(self.extracMaxCharge.get()))+"\n")
fw.write("CHARGE_CARRIER\t"+str(CHARGE_CARRIER[0])+"\n")
fw.write("MIN_TOTAL\t"+str(float(self.extracMinTotal.get()))+"\n")
fw.write("BACKGROUND_TOTAL\t"+str(int(self.extracBack.get()))+"\n")
fw.write("S_N_CUTOFF\t"+str(int(self.extracSnCutoff.get()))+"\n")
fw.write("EXTRACTION_PADDING\t"+str(int(self.extracPad.get()))+"\n")
master.measurementWindow = 1
top = self.top = tk.Toplevel()
self.chargeCarrierVar = tk.StringVar()
self.chargeCarrierVar.set(CHARGE_CARRIER[0])
options = []
top.protocol( "WM_DELETE_WINDOW", lambda: close(self))
self.alignmentLabel = tk.Label(top, text="Alignment parameters", font="bold")
self.alignmentLabel.grid(row=0, columnspan=2, sticky=tk.W)
self.alignTimeWindowLabel = tk.Label(top, text="Alignment time window")
self.alignTimeWindowLabel.grid(row=1, column=0, sticky=tk.W)
self.alignTimeWindow = tk.Entry(top)
self.alignTimeWindow.insert(0, ALIGNMENT_TIME_WINDOW)
self.alignTimeWindow.grid(row=1, column=1, sticky=tk.W)
self.alignMassWindowLabel = tk.Label(top, text="Alignment m/z window")
self.alignMassWindowLabel.grid(row=2, column=0, sticky=tk.W)
self.alignMassWindow = tk.Entry(top)
self.alignMassWindow.insert(0, ALIGNMENT_MASS_WINDOW)
self.alignMassWindow.grid(row=2, column=1, sticky=tk.W)
self.alignSnLabel = tk.Label(top, text="Minimal S/N for alignment")
self.alignSnLabel.grid(row=3, column=0, sticky=tk.W)
self.alignSn = tk.Entry(top)
self.alignSn.insert(0, ALIGNMENT_S_N_CUTOFF)
self.alignSn.grid(row=3, column=1, sticky=tk.W)
self.alignMinLabel = tk.Label(top, text="Minimal features for alignment")
self.alignMinLabel.grid(row=4, column=0, sticky=tk.W)
self.alignMin = tk.Entry(top)
self.alignMin.insert(0, ALIGNMENT_MIN_PEAK)
self.alignMin.grid(row=4, column=1, sticky=tk.W)
self.calibrationLabel = tk.Label(top, text="Calibration parameters", font="bold")
self.calibrationLabel.grid(row=5, columnspan=2, sticky=tk.W)
self.calibMassWindowLabel = tk.Label(top, text="Calibration mass window")
self.calibMassWindowLabel.grid(row=6, column=0, sticky=tk.W)
self.calibMassWindow = tk.Entry(top)
self.calibMassWindow.insert(0, CALIB_MASS_WINDOW)
self.calibMassWindow.grid(row=6, column=1, sticky=tk.W)
self.calibSnLabel = tk.Label(top, text="Minimal S/N for calibration")
self.calibSnLabel.grid(row=7, column=0, sticky=tk.W)
self.calibSn = tk.Entry(top)
self.calibSn.insert(0, CALIB_S_N_CUTOFF)
self.calibSn.grid(row=7, column=1, sticky=tk.W)
self.calibMinLabel = tk.Label(top, text="Minimal number of calibrants")
self.calibMinLabel.grid(row=8, column=0, sticky=tk.W)
self.calibMin = tk.Entry(top)
self.calibMin.insert(0, CALIB_MIN_PEAK)
self.calibMin.grid(row=8, column=1, sticky=tk.W)
self.extractionLabel = tk.Label(top, text="Extraction parameters", font="bold")
self.extractionLabel.grid(row=9, columnspan=2, sticky=tk.W)
self.sumSpecLabel = tk.Label(top, text="Data points per 1 m/z")
self.sumSpecLabel.grid(row=10, column=0, sticky=tk.W)
self.sumSpec = tk.Entry(top)
self.sumSpec.insert(0, SUM_SPECTRUM_RESOLUTION)
self.sumSpec.grid(row=10, column=1, sticky=tk.W)
self.extracMassWindowLabel = tk.Label(top, text="Extraction m/z window")
self.extracMassWindowLabel.grid(row=12, column=0, sticky=tk.W)
self.extracMassWindow = tk.Entry(top)
self.extracMassWindow.insert(0, MASS_WINDOW)
self.extracMassWindow.grid(row=12, column=1, sticky=tk.W)
self.extracTimeWindowLabel = tk.Label(top, text="Extraction time window")
self.extracTimeWindowLabel.grid(row=13, column=0, sticky=tk.W)
self.extracTimeWindow = tk.Entry(top)
self.extracTimeWindow.insert(0, TIME_WINDOW)
self.extracTimeWindow.grid(row=13, column=1, sticky=tk.W)
self.extracPadLabel = tk.Label(top, text="Extraction window padding")
self.extracPadLabel.grid(row=14, column=0, sticky=tk.W)
self.extracPad = tk.Entry(top)
self.extracPad.insert(0, EXTRACTION_PADDING)
self.extracPad.grid(row=14, column=1, sticky=tk.W)
self.extracMinChargeLabel = tk.Label(top, text="Minimum charge state")
self.extracMinChargeLabel.grid(row=15, column=0, sticky=tk.W)
self.extracMinCharge = tk.Entry(top)
self.extracMinCharge.insert(0, MIN_CHARGE)
self.extracMinCharge.grid(row=15, column=1, sticky=tk.W)
self.extracMaxChargeLabel = tk.Label(top, text="Maximum charge state")
self.extracMaxChargeLabel.grid(row=16, column=0, sticky=tk.W)
self.extracMaxCharge = tk.Entry(top)
self.extracMaxCharge.insert(0, MAX_CHARGE)
self.extracMaxCharge.grid(row=16, column=1, sticky=tk.W)
for i in UNITS:
if BLOCKS[i]['available_for_charge_carrier'] == 1:
options.append(i)
self.chargeCarrierLabel = tk.Label(top, text="Charge carrier")
self.chargeCarrierLabel.grid(row=17, column=0, sticky=tk.W)
self.chargeCarrier = tk.OptionMenu(top, self.chargeCarrierVar, *options)
self.chargeCarrier.grid(row=17, column=1, sticky=tk.W)
self.extracMinTotalLabel = tk.Label(top, text="Minimum isotopic fraction")
self.extracMinTotalLabel.grid(row=18, column=0, sticky=tk.W)
self.extracMinTotal = tk.Entry(top)
self.extracMinTotal.insert(0, MIN_TOTAL)
self.extracMinTotal.grid(row=18, column=1, sticky=tk.W)
self.extracBackLabel = tk.Label(top, text="Background detection window")
self.extracBackLabel.grid(row=19, column=0, sticky=tk.W)
self.extracBack = tk.Entry(top)
self.extracBack.insert(0, BACKGROUND_WINDOW)
self.extracBack.grid(row=19, column=1, sticky=tk.W)
self.extracSnCutoffLabel = tk.Label(top, text="Spectra QC S/N cutoff")
self.extracSnCutoffLabel.grid(row=20, column=0, sticky=tk.W)
self.extracSnCutoff = tk.Entry(top)
self.extracSnCutoff.insert(0, S_N_CUTOFF)
self.extracSnCutoff.grid(row=20,column=1, sticky=tk.W)
self.ok = tk.Button(top,text = 'Ok', command = lambda: close(self))
self.ok.grid(row = 21, column = 0, sticky = tk.W)
self.save = tk.Button(top, text = 'Save', command = lambda: save(self))
self.save.grid(row = 21, column = 1, sticky = tk.E)
# Tooltips
createToolTip(self.alignTimeWindowLabel,"The time window in seconds around the specified time of an alignment feature that LaCyTools is allowed to look for the maximum intensity of each feature.")
createToolTip(self.alignMassWindowLabel,"The m/z window in Thompson around the specified exact m/z of an alignment feature, that LaCyTools will use to find the maximum of each feature.")
createToolTip(self.alignSnLabel,"The minimum S/N of an alignment feature to be included in the alignment.")
createToolTip(self.alignMinLabel,"The minimum number of features that have a S/N higher than the minimum S/N for alignment to occur.")
createToolTip(self.calibMassWindowLabel,"The mass window in Dalton around the specified exact m/z of a calibrant, that LaCyTools uses to determine the uncalibrated accurate mass. This value will be charge state corrected, i.e. for a triple charged analyte the used window will be the value specified here divided by 3.")
createToolTip(self.calibSnLabel,"The minimum S/N of a calibrant to be included in the calibration.")
createToolTip(self.calibMinLabel,"The minimum number of calibrants that have a S/N higher than the minimum S/N for calibration to occur.")
createToolTip(self.sumSpecLabel,"The number of bins per m/z that will be used in the sum spectrum. A value of 100 means that each data point in the sum spectrum is spaced at 0.01 m/z.")
createToolTip(self.extracMassWindowLabel,"The m/z window in Thompson around the specified exact m/z of a feature that LaCyTools will use for quantitation. For example, a value of 0.1 results in LaCyTools quantifying 999.9 to 1000.1 for a feature with an m/z value of 1000.")
createToolTip(self.extracTimeWindowLabel,"The rt window in seconds around the specified elution time of each cluster that contains features for quantitation. For example, a value of 10 will result in LaCyTools creating a sum spectrum from 90 s. to 110 s. for a cluster eluting at 100s.")
createToolTip(self.extracMinChargeLabel,"The minimum charge state that LaCyTools will attempt to use in calibration and quantitation for all features listed in the analyte reference file.")
createToolTip(self.extracMaxChargeLabel,"The maximum charge state that LaCyTools will attempt to use in calibration and quantitation for all features listed in the analyte reference file.")
createToolTip(self.chargeCarrierLabel,"The charge carrier that is applied to all specified analytes for quantitation.")
createToolTip(self.extracPadLabel,"The number of windows before the regular analyte windows that will be examined to determine the IPQ.")
createToolTip(self.extracMinTotalLabel,"The minimum fraction of the theoretical isotopic pattern that LaCyTools will use for quantitation. For example, a value of 0.95 means that LaCyTools will quantify isotopes until the sum of the quantified isotopes exceeds 0.95 of the total theoretcal isotopic pattern.")
createToolTip(self.extracBackLabel,"The mass window in Dalton that LaCyTools is allowed to look for the local background and noise for each analyte. For example, a value of 10 means that LaCyTools will look from 990 m/z to 1010 m/z for an analyte with an m/z of 1000.")
createToolTip(self.extracSnCutoffLabel,"The minimum S/N of an analyte to be included in the spectral QC. Specifically, for the output that lists what fraction of the total quantified analytes passed the here specified S/N value.")
def getSettings(self):
""" This function reads the settings file as specified in the
program, applying them to the program.
"""
with open(SETTINGS_FILE,'r') as fr:
for line in fr:
line = line.rstrip('\n')
chunks = line.split()
if chunks[0] == "ALIGNMENT_TIME_WINDOW":
global ALIGNMENT_TIME_WINDOW
ALIGNMENT_TIME_WINDOW = float(chunks[1])
if chunks[0] == "ALIGNMENT_MASS_WINDOW":
global ALIGNMENT_MASS_WINDOW
ALIGNMENT_MASS_WINDOW = float(chunks[1])
if chunks[0] == "ALIGNMENT_S_N_CUTOFF":
global ALIGNMENT_S_N_CUTOFF
ALIGNMENT_S_N_CUTOFF = int(chunks[1])
if chunks[0] == "ALIGNMENT_MIN_PEAK":
global ALIGNMENT_MIN_PEAK
ALIGNMENT_MIN_PEAK = int(chunks[1])
if chunks[0] == "CALIB_MASS_WINDOW":
global CALIB_MASS_WINDOW
CALIB_MASS_WINDOW = float(chunks[1])
if chunks[0] == "CALIB_S_N_CUTOFF":
global CALIB_S_N_CUTOFF
CALIB_S_N_CUTOFF = int(chunks[1])
if chunks[0] == "CALIB_MIN_PEAK":
global CALIB_MIN_PEAK
CALIB_MIN_PEAK = int(chunks[1])
if chunks[0] == "SUM_SPECTRUM_RESOLUTION":
global SUM_SPECTRUM_RESOLUTION
SUM_SPECTRUM_RESOLUTION = int(chunks[1])
if chunks[0] == "MASS_WINDOW":
global MASS_WINDOW
MASS_WINDOW = float(chunks[1])
if chunks[0] == "TIME_WINDOW":
global TIME_WINDOW
TIME_WINDOW = float(chunks[1])
if chunks[0] == "MIN_CHARGE":
global MIN_CHARGE
MIN_CHARGE = int(chunks[1])
if chunks[0] == "MAX_CHARGE":
global MAX_CHARGE
MAX_CHARGE = int(chunks[1])
if chunks[0] == "MIN_TOTAL":
global MIN_TOTAL
MIN_TOTAL = float(chunks[1])
if chunks[0] == "BACKGROUND_TOTAL":
global BACKGROUND_TOTAL
BACKGROUND_TOTAL = int(chunks[1])
if chunks[0] == "S_N_CUTOFF":
global S_N_CUTOFF
S_N_CUTOFF = int(chunks[1])
def feature_reader(self,file):
""" This reads the contents of the alignmen features file and
stores the relevant values in a list.
INPUT: A filename
OUTPUT: A list of m/z,retention lists (elements are type float)
"""
features = []
with open(file,'r') as fr:
for line in fr:
try:
if line[0][0].isdigit():
line = line.rstrip().split()
features.append([float(x) for x in line])
except IndexError:
print ("Incorrect line observed in: ")+str(file)
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tIncorrect line observed in: "+str(analyteFile)+"\n")
except:
print ("Unexpected Error: "), sys.exc_info()[0]
return features
def fitFunc(self, x,a,b,c):
penalty = 0
if b > 2.:
penalty = abs(b-1.)*10000
if b < 0.:
penalty = abs(2.-b)*10000
return a*x**b + c + penalty
def fitFuncLin(self, x,a,b):
return a*x + b
def calcQuadratic(self,data,func):
""" This function fits the specified function in 'fitFunc'
to the data, using the curve_fit package from scipy.optimize.
INPUT: A list of (m/z,int) tuples
OUTPUT: The parameters for the fitted function
"""
expected = []
observed = []
for i in data:
expected.append(i[0])
observed.append(i[1])
try:
if func == "PowerLaw":
z = curve_fit(self.fitFunc, observed, expected)#,maxfev=10000)
elif func == "Linear":
z = curve_fit(self.fitFuncLin,observed,expected)
name = self.inputFile.split(".")[0]
name = os.path.join(self.batchFolder,name)
#############
# Plot Code #
#############
minX = min(expected)-0.1*min(expected)
maxX = max(expected)+0.1*max(expected)
newX = numpy.linspace(minX,maxX,2500*(maxX-minX))
linY = newX
if func == "PowerLaw":
yNew = self.fitFunc(newX,*z[0])
minY = self.fitFunc(minX,*z[0])
maxY = self.fitFunc(maxX,*z[0])
elif func == "Linear":
yNew = self.fitFuncLin(newX,*z[0])
minY = self.fitFuncLin(minX,*z[0])
maxY = self.fitFuncLin(maxX,*z[0])
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
plt.scatter(expected,observed,c='b',label='Raw',alpha=0.5)
observedCalibrated = []
for index, j in enumerate(observed):
if func == "PowerLaw":
observedCalibrated.append(self.fitFunc(j,*z[0]))
elif func == "Linear":
observedCalibrated.append(self.fitFuncLin(j,*z[0]))
plt.scatter(expected,observedCalibrated,c='r',label='Calibrated',marker='s',alpha=0.5)
numbers = ["%.2f" % number for number in z[0]]
if func == "PowerLaw":
if float(numbers[2]) > 0.0:
plt.plot(newX,yNew,label="Fit, Function: "+str(numbers[0])+"x"+"$^{"+str(numbers[1])+"}$+"+str(numbers[2]),c='b')
else:
plt.plot(newX,yNew,label="Fit, Function: "+str(numbers[0])+"x"+"$^{"+str(numbers[1])+"}$"+str(numbers[2]),c='b')
elif func == "Linear":
if float(numbers[1]) > 0.0:
plt.plot(newX,yNew, label="Fit, Function: "+str(numbers[0])+"x+"+str(numbers[1]),c='b')
else:
plt.plot(newX,yNew, label="Fit, Function: "+str(numbers[0])+"x"+str(numbers[1]),c='b')
plt.plot(newX,linY,label='Target',c='r',linestyle='--')
plt.legend(loc='best')
plt.xlabel("Expected rt (s.)")
plt.ylabel("Observed rt (s.)")
plt.xlim(minX,maxX)
plt.ylim(minY,maxY)
fig.savefig(name,dpi=800)
plt.close()
###############
# end of plot #
###############
except RuntimeError:
z = None
return z
def dataPopup(self,master):
""" This function creates a popup window belonging to the HD5
data format. The window has a button where the user has to select
the location of his mzXML files, a checkbox indicating if the
mzXML files can be deleted afterwards and lastly a run button.
INPUT: None
OUTPUT: None
"""
if master.dataWindow == 1:
return
master.dataWindow = 1
self.folder = tk.StringVar()
self.ptFileName = tk.StringVar()
def close(self):
master.dataWindow = 0
top.destroy()
def batchButton():
master.openBatchFolder()
self.folder.set(master.batchFolder)
top = self.top = tk.Toplevel()
top.protocol( "WM_DELETE_WINDOW", lambda: close(self))
self.batchDir = tk.Button(top, text = "Batch Directory", width = 25, command = lambda: batchButton())
self.batchDir.grid(row = 0, column = 0, sticky = tk.W)
self.batch = tk.Label(top, textvariable = self.folder, width = 25)
self.batch.grid(row = 0, column = 1)
self.remove = tk.Checkbutton(top, text = "Remove mzXML files", variable = master.rmMZXML, onvalue = 1, offvalue = 0)
self.remove.grid(row = 1, column = 0, sticky = tk.W)
self.convertButton = tk.Button(top, text = "Batch Convert to pyTables", width = 25, command = lambda: master.batchConvert(master))
self.convertButton.grid(row = 2, column = 0,columnspan = 2)
def batchConvert(self,master):
""" TODO: COMMENT THIS FUNCTION PLEASE.
This function does x, using Y
INPUT: stuff
OUTPUT: stuff
"""
import time
start_time = time.time()
filenames = glob.glob(str(self.batchFolder)+"/*" + EXTENSION)
print ("Converting...")
filename = filenames[0]
array = []
self.inputFile = filename
self.readData(array,None)
nscans = len(array)
size = 0
for rt, spectrum in array:
size = max(size, len(spectrum))
SCAN_SIZE = int(size * 1.1)
try:
rawfile = tables.open_file(os.path.join(self.batchFolder, "pytables.h5"), "w", filters=tables.Filters(complevel=4, complib="blosc:lz4"))
except tables.HDF5ExtError:
print ("Error creating pyTables file")
raise
class Scan(tables.IsDescription):
sample = tables.Int64Col(pos=0)
scan = tables.Int64Col(pos=1)
rt = tables.Float64Col(pos=2)
art = tables.Float64Col(pos=3) # aligned retention time
idx = tables.Int64Col(pos=4)
size = tables.Int64Col(pos=5)
rawfile.create_vlarray('/', 'filenames', atom=tables.VLUnicodeAtom(), expectedrows=len(filenames))
rawfile.create_table('/', 'scans', description=Scan, expectedrows=len(filenames)*nscans)
rawfile.create_earray('/', 'mzs', atom=tables.Float64Atom((SCAN_SIZE,)), shape=(0,), chunkshape=(1,))
rawfile.create_earray('/', 'Is', atom=tables.Int64Atom((SCAN_SIZE,)), shape=(0,), chunkshape=(1,))
row = rawfile.root.scans.row
idx = 0
# main loop
for count, filename in enumerate(filenames):
self.inputFile = filename
if count >= 1:
array = []
self.readData(array,None)
mzs = numpy.zeros((len(array), SCAN_SIZE), numpy.float64)
Is = numpy.zeros((len(array), SCAN_SIZE), numpy.int64)
# loop over spectra
for scan, spectrum in enumerate(array):
rt, spectrum = spectrum
size = min(len(spectrum), SCAN_SIZE)
spectrum = numpy.array(spectrum).T
spectrum[0, 1:] = numpy.diff(spectrum[0])
mzs[scan, :size], Is[scan, :size] = spectrum[:, :size]
row['sample'] = count
row['scan'] = scan
row['rt'] = rt
row['idx'] = idx
row['size'] = size
row.append()
idx += 1
rawfile.root.mzs.append(mzs)
rawfile.root.Is.append(Is)
rawfile.root.filenames.append(filename)
if self.rmMZXML.get() == 1:
try:
os.remove(filename)
except:
raise
rawfile.close()
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tFinished converting\n")
end_time = time.time()
print ("Batch conversion lasted for ", str((end_time - start_time) / 60.), "minutes, or", str((end_time - start_time) / len(filenames)), "seconds per sample.")
messagebox.showinfo("Status Message","Batch Convert finished on "+str(datetime.now()))
def batchProcess(self,master):
""" This is the main controller function for batch processing.
First, the function checks if any reference or alignment file
was selected, producing a message box if this is not the case.
Afterwards, the function checks whether or not it has to read
from HD5 files or other accepted file formats. Subsequently,
it performs alignment if an alignment file is selected, followed
by quantitation (and calibration) if a reference list is
selected. Finally, it will combine all the individual results
into a summary file before cleaning up.
INPUT: None
OUTPUT: A summary file
"""
import time
start = time.time()
# Safety feature (prevents batchProcess from being started multiple times)
if self.batchProcessing == 1:
messagebox.showinfo("Error Message", "Batch Process already running")
return
self.batchProcessing = 1
#####################
# PROGRESS BAR CODE #
#####################
self.alPerc = tk.StringVar()
self.extPerc = tk.StringVar()
self.alPerc.set("0%")
self.extPerc.set("0%")
# barWindow = Tk()
barWindow = self.top = tk.Toplevel()
barWindow.title("Progress Bar")
al = tk.Label(barWindow, text="Alignment", padx=25)
al.grid(row=0, column=0, sticky=tk.W)
ft = ttk.Frame(barWindow)
ft.grid(row=1, columnspan=2)
perc1 = tk.Label(barWindow, textvariable=self.alPerc)
perc1.grid(row=0, column=1, padx=25)
progressbar = ttk.Progressbar(ft, length=100, mode='determinate')
progressbar.grid(row=1, columnspan=2)
ext = tk.Label(barWindow, text="Quantitation", padx=25)
ext.grid(row=2, column=0, sticky=tk.W)
ft2 = ttk.Frame(barWindow)
ft2.grid(row=3, columnspan=2)
perc2 = tk.Label(barWindow, textvariable=self.extPerc)
perc2.grid(row=2, column=1, padx=25)
progressbar2 = ttk.Progressbar(ft2, length=100, mode='determinate')
progressbar2.grid(row=3, columnspan=2)
###################
# END OF BAR CODE #
###################
# Check if reference or alignment file was selected
if self.refFile == "" and self.alFile == "" and self.calFile == "":
messagebox.showinfo("File Error","No reference or alignment file selected")
# Check for pytables file
if os.path.isfile(os.path.join(self.batchFolder,"pytables.h5")):
ptFileName = os.path.join(self.batchFolder,"pytables.h5")
if self.ptFile is None:
self.ptFile = tables.open_file(ptFileName, mode='a')
filenames = self.ptFile.root.filenames[:]
self.readData = self.readPTData
self.transform_mzXML = self.alignRTs
filenames2idx = dict([(filename, idx) for idx, filename in enumerate(filenames)])
print ('Found "pytables.h5" in batch folder.')
else:
filenames = glob.glob(os.path.join(str(self.batchFolder),"*"+EXTENSION))
filenames2idx = dict([(filename, idx) for idx, filename in enumerate(filenames)])
# ALIGNMENT
if self.alFile != "":
features = []
features = self.feature_reader(self.alFile)
features = sorted(features, key = lambda tup: tup[1])
# reset aligned rts to 0
if self.ptFile is not None and self.ptFile.isopen:
for scan in self.ptFile.root.scans:
scan['art'] = 0
scan.update()
self.ptFile.flush()
for index,file in enumerate(filenames):
self.alPerc.set(str(int( (float(index) / float(len(filenames) ) ) *100))+"%")
progressbar["value"] = int( (float(index) / float(len(filenames) ) ) *100)
progressbar.update()
array = []
timePairs = []
self.inputFile = file
self.inputFileIdx = filenames2idx[file]
readTimes = self.matchFeatureTimes(features)
self.readData(array,readTimes)
strippedFeatures = []
for i in features:
peakTime = 0
peakIntensity = 0
dataPoints = []
leftPoints = []
rightPoints = []
signalPoints = []
for j in array:
if j[0] > i[1] - 2*ALIGNMENT_TIME_WINDOW and j[0] < i[1] - ALIGNMENT_TIME_WINDOW:
dataPoints.append(self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW))
leftPoints.append(self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW))
if j[0] > i[1] + ALIGNMENT_TIME_WINDOW and j[0] < i[1] + 2*ALIGNMENT_TIME_WINDOW:
dataPoints.append(self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW))
rightPoints.append(self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW))
if j[0] < i[1] + ALIGNMENT_TIME_WINDOW and j[0] > i[1] - ALIGNMENT_TIME_WINDOW:
signalPoints.append(self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW))
if self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW) > peakIntensity:
peakIntensity = self.feature_finder(j[1],i[0]-ALIGNMENT_MASS_WINDOW,i[0]+ALIGNMENT_MASS_WINDOW)
peakTime = j[0]
############################################################################################
# Awesome cool method (with milk and cookies!) to determine background and noise in an EIC #
############################################################################################
sortedData = sorted(dataPoints)
startSize = int(0.25 * float(len(sortedData)))
currSize = startSize
currAverage = numpy.average(sortedData[0:currSize])
if self.noise == "MM":
currNoise = max(sortedData[0:currSize]) - min(sortedData[0:currSize])
elif self.noise == "RMS":
currNoise = numpy.std(sortedData[0:currSize])
directionFlag = 0
for k in range(0,len(sortedData)-(startSize+1)):
if sortedData[currSize+1] < currAverage + 3 * currNoise:
directionFlag == 1
currSize += 1
currAverage = numpy.average(sortedData[0:currSize])
if self.noise == "MM":
currNoise = max(sortedData[0:currSize]) - min(sortedData[0:currSize])
elif self.noise == "RMS":
currNoise = numpy.std(sortedData[0:currSize])
else:
if sortedData[currSize-1] > currAverage + 3 * currNoise and directionFlag == 0:
currSize -= 1
currAverage = numpy.average(sortedData[0:currSize])
if self.noise == "MM":
currNoise = max(sortedData[0:currSize]) - min(sortedData[0:currSize])
elif self.noise == "RMS":
currNoise = numpy.std(sortedData[0:currSize])
else:
break
background = currAverage
noise = currNoise
######################
# End of awesomeness #
######################
# Plot Code #
#############
"""plotPoints = leftPoints + signalPoints + rightPoints
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(sorted(plotPoints))
#plt.plot(plotPoints)
plt.axhline(y=currAverage, color ='k')
plt.axhline(y=currAverage + 3* currNoise, color = 'r')
#plt.axvline(x=len(plotPoints)/3, color = 'r')
#plt.axvline(x=(len(plotPoints)/3)*2, color = 'r')
plt.show()"""
###############
# end of plot #
###############
if peakIntensity > background + ALIGNMENT_S_N_CUTOFF * noise:
timePairs.append((i[1],peakTime))
strippedFeatures.append(i)
else:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+"\tFeature: "+str(i)+" was not above alignment S/N cutoff: "+str(ALIGNMENT_S_N_CUTOFF)+" in file: "+str(file)+"\n")
# Make sure that enough features are used for alignment
if len(timePairs) >= ALIGNMENT_MIN_PEAK:
warnUser = False
# Attempt advanced alignment (PowerLaw)
alignFunction = self.calcQuadratic(timePairs,"PowerLaw")
# Fall back to basic alignment (Linear)
if alignFunction == None:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tAdvanced alignment failed on file: "+str(file)+", switching to basic alignment\n")
alignFunction = self.calcQuadratic(timePairs,"Linear")
if alignFunction == None:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+"\tFile: "+str(file)+" could not be aligned. Both advanced and basic alignment fits failed\n")
outFile = os.path.split(file)[-1]
outFile = "unaligned_"+outFile
outFile = os.path.join(self.batchFolder,outFile)
open(outFile,'w').close()
continue
# Bind correct fit function to fit (PowerLaw or Linear)
if len(alignFunction[0]) == 3:
fit = self.fitFunc
elif len(alignFunction[0]) == 2:
fit = self.fitFuncLin
# Create alignment output file
alignmentOutput = self.inputFile.split(".")[0]
alignmentOutput = alignmentOutput + ".alignment"
with open(alignmentOutput,'w') as falign:
lsq = 0
falign.write("Peak\tExpected RT\tOriginal RT\tAligned RT\n")
for index,timePair in enumerate(timePairs):
falign.write(str(strippedFeatures[index][0])+"\t"+str(timePair[0])+"\t"+str(timePair[1])+"\t"+str(fit(float(timePair[1]),*alignFunction[0]))+"\n")
lsq += float(strippedFeatures[index][0]) - fit(float(timePair[1]),*alignFunction[0])
self.transform_mzXML(file,fit,alignFunction[0])
else:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tFile not aligned due to lack of features\n")
outFile = os.path.split(file)[-1]
outFile = "unaligned_"+outFile
outFile = os.path.join(self.batchFolder,outFile)
open(outFile,'w').close()
self.alPerc.set("100%")
progressbar["value"] = 100
# (CALIBRATION AND) EXTRACTION
if self.refFile != "":
if self.analyteIntensity.get() == 0 and self.analyteRelIntensity.get() == 0 and self.analyteBackground.get() == 0 and self.analyteNoise.get() == 0 and self.alignmentQC.get() == 0 and self.qualityControl.get() == 0 and self.spectraQualityControl.get() == 0:
messagebox.showinfo("Output Error","No outputs selected")
self.initCompositionMasses(self.refFile)
ref = []
self.refParser(ref)
times = []
for i in ref:
times.append((i[4],i[5]))
chunks = collections.OrderedDict()
for i in times:
if i not in chunks.keys():
chunks['%s' % '-'.join(i)] = []
for i in ref:
chunks['%s' % '-'.join((i[4],i[5]))].append(i)
if os.path.isfile(os.path.join(self.batchFolder,"pytables.h5")) == False:
filenames = glob.glob(os.path.join(str(self.batchFolder),EXTRACTION+"*"+EXTENSION))
filenames2idx = dict([(filename, idx) for idx, filename in enumerate(filenames)])
for index,file in enumerate(filenames):
self.extPerc.set(str(int( (float(index) / float(len(filenames) ) ) *100))+"%")
progressbar2["value"] = int( (float(index) / float(len(filenames) ) ) *100)
progressbar2.update()
results = []
self.inputFile = file
self.inputFileIdx = filenames2idx[file]
array = []
readTimes = self.matchAnalyteTimes(ref)
self.readData(array, readTimes)
for index,i in enumerate(chunks.keys()):
spectrum = self.sumSpectrum(i,array)
# Dirty hack to now get rid of the time window again
rt = tuple(i.split('-'))[0]
calibrants = []
# Calibrate the sum spectrum
if self.calFile.get() == 1:
for j in ref:
if j[6] == "True" and int(round(float(j[4]))) == int(round(float(rt))):
charge = j[0].split("_")[-2]
calibrants.append((float(j[1]),int(charge)))
measuredMaxima = self.getLocalMaxima(calibrants,spectrum)
presentCalibrants = self.getObservedCalibrants(measuredMaxima,calibrants)
measuredMaximaMZ = []
# Strip the m/z values from the maxima
for j in measuredMaxima:
measuredMaximaMZ.append(j[0])
# Perform 2d degree polynomial fit
if len(measuredMaximaMZ) >= CALIB_MIN_PEAK:
z = numpy.polyfit(measuredMaximaMZ,presentCalibrants,2) # This should be the correct one
else:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tUnable to calibrate the sum spectrum at "+str(i)+" seconds\n")
# Adjust filename
(old, new) = os.path.split(self.inputFile)
old = os.path.abspath(old)
new = os.path.splitext(new)[0]
new = "Uncalibrated_sumSpectrum_"+str(i)+"_"+str(new)+".xy"
new = os.path.join(old,new)
outFile = "\\\\?\\"+new
# Write
with open(outFile,'w') as fw:
fw.write("\n".join(str(j[0])+"\t"+str(j[1]) for j in spectrum))
continue
f = numpy.poly1d(z)
calOut = str(file.split(".")[0])+"_"+str(index)+".calibration"
with open(calOut,'w') as fw2:
for index,j in enumerate(measuredMaximaMZ):
fw2.write("accurate mass: "+str(presentCalibrants[index])+" measured at "+str(j) +" being calibrated to: "+str(f(j))+"\n")
mzList = []
intList = []
for j in spectrum:
mzList.append(float(j[0]))
intList.append(int(j[1]))
# Transform python list into numpy array
mzArray = numpy.array(mzList)
newArray = f(mzArray)
newSpectrum = []
for index,j in enumerate(newArray):
newSpectrum.append((j,intList[index]))
spectrum = newSpectrum
# Adjust filename
(old, new) = os.path.split(self.inputFile)
old = os.path.abspath(old)
new = os.path.splitext(new)[0]
new = "sumSpectrum_"+str(i)+"_"+str(new)+".xy"
new = os.path.join(old,new)
outFile = "\\\\?\\"+new
# Write
with open(outFile,'w') as fw:
fw.write("\n".join(str(j[0])+"\t"+str(j[1]) for j in spectrum))
else:
# Adjust filename
(old, new) = os.path.split(self.inputFile)
old = os.path.abspath(old)
new = os.path.splitext(new)[0]
new = "sumSpectrum_"+str(i)+"_"+str(new)+".xy"
new = os.path.join(old,new)
outFile = "\\\\?\\"+new
# Write
with open(outFile,'w') as fw:
fw.write("\n".join(str(j[0])+"\t"+str(j[1]) for j in spectrum))
self.extractData(chunks[i],spectrum,results)
self.writeResults(results,file)
# Wrap up stuff
self.extPerc.set("100%")
progressbar2["value"] = 100
barWindow.destroy()
self.combineResults()
if self.ptFile is not None:
self.ptFile.close()
self.batchProcessing = 0
end = time.time()
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tBatch process lasted for "+str((end - start) / 60.)+"minutes\n")
messagebox.showinfo("Status Message","Batch Process finished on "+str(datetime.now()))
def writeCalibration(self,function,array):
""" This function creates a calibrated mzXML file. However, the
function is currently not being used and might be removed in the
future.
INPUT: Calibration function and the raw data in an array
OUTPUT: A calibrated mzXML file
"""
endian = "!"
started = False
with open(self.inputFile,'r') as fr:
name = os.path.split(str(self.inputFile))[-1]
name = name.split(".")[0]
name = "calibrated_"+name+".mzXML" # TODO: Make the extension dynamic
with open(name,'w') as fw:
counter = 0
mzList = []
intList = []
values = []
for line in fr:
if 'zlib' in line:
fw.write(line)
compression = True
elif 'byteOrder' in line:
fw.write(line)
byteOrder = line.split("byteOrder")[1]
byteOrder = byteOrder.split("\"")[1]
endian = "!"
if byteOrder == 'little':
endian = '<'
elif byteOrder == 'big':
endian = '>'
elif 'precision' in line:
fw.write(line)
precision = line.split("precision")[1]
precision = precision.split("\"")[1]
if int(precision) == 64:
precision = 'd'
else:
precision = 'f'
elif 'contentType="m/z-int">' in line:
mzList = []
intList = []
values = []
for i in array[counter][1]:
mzList.append(i[0])
intList.append(i[1])
mzArray = numpy.array(mzList)
newArray = function(mzArray)
for index, i in enumerate(newArray):
values.append(i)
values.append(intList[index])
format = str(endian)+str(len(values))+precision
data = struct.pack(format, *values)
if compression == True:
data = zlib.compress(data)
data = base64.b64encode(data)
fw.write('contentType="m/z-int">'+str(data)+'</peaks>\n')
counter += 1
else:
fw.write(line)
def getObservedCalibrants(self,maxima,potentialCalibrants):
""" This function compares the list of local maxima with the
expected calibrants. The function will perceive the observed
local maxima that is closest to a desired calibrant as being the
m/z where the calibrant was observed in the spectrum. The
function then appends the theoretical m/z value of a calibrants
that were actually observed to a list (actualCalibrants) which
is returned at the end of the function.
INPUT 1: A list of floats containg the observed local maxima (of
the spline fit within each inclusion range, assuming that they
were above user specified S/N cut off).
INPUT 2: A list of floats containing the theoretical m/z of all
calibrants.
OUTPUT: A list of floats containing the theoretical m/z of the
calibrants which were near an oberved local maxima.
"""
actualCalibrants = []
for i in maxima:
diff = 4.0
closest = 0
for j in potentialCalibrants:
if abs(float(j[0])-float(i[0])) < diff:
diff = abs(float(j[0])-float(i[0]))
closest = float(j[0])
actualCalibrants.append(closest)
return actualCalibrants
def getLocalMaxima(self,features,spectrum):
""" This function takes a list of potential calibrants and will
identify the m/z value that shows the maximum intensity. The
function will determine the accurate mass from a interpolated
univariate spline that is fitted through the data points,
yielding improved post calibration mass accuracy.
INPUT: A spectrum and a list of features (mass,charge)
OUTPUT: A containing (accurate mass, intensity) tuples for the
calibrants that passed the user specified S/N cutoff.
"""
maxima = []
for i in features:
mass, charge = i
window = CALIB_MASS_WINDOW / charge
lowMz = self.binarySearch(spectrum,float(mass)-float(window),len(spectrum)-1,'left')
highMz = self.binarySearch(spectrum,float(mass)+float(window),len(spectrum)-1,'right')
x_points = []
y_points = []
for j in spectrum[lowMz:highMz]:
x_points.append(j[0])
y_points.append(j[1])
newX = numpy.linspace(x_points[0],x_points[-1],2500*(x_points[-1]-x_points[0]))
maximum = (newX[int(len(newX)/2)],0)
try:
f = InterpolatedUnivariateSpline(x_points,y_points)
ySPLINE = f(newX)
for index, j in enumerate(ySPLINE):
if j > maximum[1]:
maximum = (newX[index],j)
except ValueError:
data = zip(x_points,y_points)
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tGuassian Curve Fit failed for analyte: "+str(i[0])+", reverting to non fitted local maximum\n")
for j in data:
if j[1] > maximum[1]:
maximum = (j[0],j[1])
except:
print ("Analyte: "+str(i[0])+" is being troublesome, kill it")
# Plot Code (for testing purposes)
"""fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(x_points, y_points, 'b*')
#plt.plot(newX,newY, 'b--')
#plt.plot(newX,ySPLINE,'r--')
plt.plot(newX,yINTER,'r--')
plt.plot(newX,yUNIVAR,'g--')
#plt.legend(['Raw Data','Guassian (All Points)','Cubic Spline'], loc='best')
#plt.legend(['Raw Data','Cubic Spline'], loc='best')
plt.legend(['Raw Data','Interp1d','Univariate Spline'], loc='best')
plt.show()"""
# Check if maxima above S/N cut-off
values = self.getBackground(spectrum, maximum[0], charge, window)
background,noise = values[0], values[2]
if maximum[1] > background + CALIB_S_N_CUTOFF * noise:
maxima.append(maximum)
return maxima
def readCalibrationFeatures(self):
""" This function reads the calibration file and returns the
features in a tuple containg the lower time and upper time values
followed by a list of the m/z coordinates
INPUT: None
OUTPUT: Tuple containing (lowTime, highTime, [m/z coordinatse])
"""
with open(self.calFile,'r') as fr:
firstLine = fr.readline()
lowTime, highTime = firstLine.strip().split("\t")
mz = []
for line in fr:
mz.append(float(line.strip()))
return (float(lowTime), float(highTime), mz)
def sumSpectrum(self,time,array):
""" This function creates a summed spectrum and returns the
resulting spectrum back to the calling function.
INPUT: The retention time-time window and an array containing
the entire measurement
OUTPUT: A sum spectrum in array form (m/z, intensity)
"""
time = tuple(time.split('-'))
# This is returning None's now
lowTime = self.binarySearch(array,float(time[0])-float(time[1]),len(array)-1,'left')
highTime = self.binarySearch(array,float(time[0])+float(time[1]),len(array)-1,'right')
LOW_MZ = 25000.0
HIGH_MZ = 0.0
for i in array[lowTime:highTime]:
if i[1][0][0] < LOW_MZ:
LOW_MZ = i[1][0][0]
if i[1][-1][0] > HIGH_MZ:
HIGH_MZ = i[1][-1][0]
# This should be dynamically determined
arraySize = (float(HIGH_MZ) - float(LOW_MZ)) * float(SUM_SPECTRUM_RESOLUTION)
combinedSpectra = numpy.zeros(shape=(int(arraySize+2),2))
bins = []
for index, i in enumerate(combinedSpectra):
i[0] = float(LOW_MZ) + index*(float(1)/float(SUM_SPECTRUM_RESOLUTION))
bins.append(float(LOW_MZ) + index*(float(1)/float(SUM_SPECTRUM_RESOLUTION)))
fullSet = []
mz = []
start = datetime.now()
for i in array[lowTime:highTime]:
for j in i[1]:
fullSet.append(j)
mz.append(j[0])
fullSet.sort(key = lambda tup: tup[0])
mz.sort()
mzArray = numpy.asarray(mz)
binsArray = numpy.asarray(bins)
test = numpy.searchsorted(binsArray,mzArray)
for index, i in enumerate(fullSet):
try:
combinedSpectra[test[index]][1] += i[1]
except:
# We ignore the data points at m/z edge, if they are important
# then the user should do a proper measurement.
pass
#from scipy.signal import savgol_filter
#new = savgol_filter(combinedSpectra,21,3)
#return new
return combinedSpectra
def findNearest(self,array,value):
""" A depracated function, will most likely be removed in the
near future.
"""
if value >= array[0][0] and value <= array[-1][0]:
diff = 1
# First Pass
a = 0
b = len(array)
while a < b:
mid = (a+b)//2
if array[mid][0] > value:
b = mid
else:
a = mid+1
if array[a][0] - value < diff:
diff = array[a][0] - value
index = a
# Second Pass
a = 0
b = len(array)
while a < b:
mid = (a+b)//2
if array[mid][0] < value:
a=mid+1
else:
b=mid
if array[a][0] - value < diff:
diff = array[a][0] - value
index = a
return a
def transform_mzXML(self,file,fit,alignFunction):
"""Reads the mzXML file and transforms the reported retention
time by the specified polynomial function.
INPUT: A filename, alignment function and fitting model
OUTPUT: An aligned mzXML file
"""
with open(file,'r') as fr:
outFile = os.path.split(file)[-1]
# Use * to indicate files that were aligned using the basic alignment
if len(alignFunction) == 3:
outFile = "aligned_"+outFile
elif len(alignFunction) == 2:
outFile = "alignedLin_"+outFile
outFile = os.path.join(self.batchFolder,outFile)
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tWriting output file: "+outFile+"\n")
with open(outFile,'w') as fw:
for line in fr:
if 'retentionTime' in line:
time = line.strip()
time = time.split("\"")
for index,i in enumerate(time):
if 'retentionTime' in i:
time=time[index+1]
break
if time[0] == 'P':
time = time[2:-1]
# The below line is only to make it work with mzMine
if fit(float(time),*alignFunction) < 0:
newTime = str(0)
else:
newTime = str(fit(float(time),*alignFunction))
line = line.replace(time,newTime)
fw.write(line)
else:
fw.write(line)
def alignRTs(self,file,polynomial):
"""Reads the mzXML file and transforms the reported retention
time by the specified polynomial function.
INPUT: A filename and the alignment function
OUTPUT: An aligned mzXML file
"""
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tAligning file: "+str(self.inputFile)+"\n")
i = self.inputFileIdx
for row in self.ptFile.root.scans.where("sample == i"):
time = row['rt']
# The below line is only to make it work with mzMine
if self.fitFunc(time,*polynomial) > 0:
row['art'] = self.fitFunc(time,*polynomial)
row.update()
self.ptFile.flush()
def feature_finder(self,data,lowMass,highMass):
# Proper fix
intensity = 0
start = self.binarySearch(data,lowMass,len(data)-1,'left')
end = self.binarySearch(data,highMass,len(data)-1,'right')
for i in data[start:end]:
if i[1] > intensity:
intensity = i[1]
return intensity
def createHeader(self, compositions, reference, chargestate=None):
"""Creates a generic header for both combined and separate
charge states. The function uses the initial reference list,
the extracted compositions and the optional chargestate.
INPUT 1: A list of tuples (analyte composition, analyte
retention time)
INPUT 2: A list of analyte reference tuples (analyte, m/z,
relative area, m/z window, rt, rt window and
calibration)
INPUT 3: An integer
OUTPUT: A string containing the header for the final summary
"""
header = ""
for i in compositions:
header += "\t"+str(i[0])
header += "\n"
# List of theoretical areas
header += "Fraction"
for i in compositions:
sumInt = 0.
for j in reference:
analyte = "_".join(j[0].split("_")[:-2])
charge = j[0].split("_")[-2]
isotope = j[0].split("_")[-1]
time = j[4]
timewindow = j[5]
if chargestate == None:
if str(analyte) == str(i[0]) and float(time) == float(i[1]) and float(timewindow) == float(i[2]):
sumInt += float(j[2])
else:
if str(analyte) == str(i[0]) and float(time) == float(i[1]) and float(timewindow) == float(i[2]) and int(chargestate) == int(charge):
sumInt += float(j[2])
header += "\t"
if sumInt > 0.:
header += str(sumInt)
header += "\n"
# List of monoisotopic masses
header += "Monoisotopic Mass"
for i in compositions:
masses = []
current_relative_intensity = 0.
# Retrieve highest relative intensity
for j in reference:
analyte = "_".join(j[0].split("_")[:-2])
charge = j[0].split("_")[-2]
isotope = j[0].split("_")[-1]
relative_intensity = float(j[2])
time = j[4]
timewindow = j[5]
if str(analyte) == str(i[0]) and float(time) == float(i[1]) and float(timewindow) == float(i[2]) and relative_intensity >= current_relative_intensity:
current_relative_intensity = relative_intensity
# Get actual data
for j in reference:
analyte = "_".join(j[0].split("_")[:-2])
charge = j[0].split("_")[-2]
isotope = j[0].split("_")[-1]
relative_intensity = float(j[2])
time = j[4]
timewindow = j[5]
if chargestate == None:
if str(analyte) == str(i[0]) and float(time) == float(i[1]) and float(timewindow) == float(i[2]) and relative_intensity == current_relative_intensity:
masses.append(float(j[1]))
else:
if str(analyte) == str(i[0]) and float(time) == float(i[1]) and float(timewindow) == float(i[2]) and relative_intensity == current_relative_intensity and int(chargestate) == int(charge):
masses.append(float(j[1]))
if masses:
masses = "["+", ".join(map(str, masses))+"]"
else:
masses = ""
try:
header += "\t"+masses
except TypeError:
header += "\tNA"
header += "\n"
return header
def combineResults(self):
""" This function reads all the raw files and creates the summary
output file.
INPUT: None
OUTPUT: A summary file
"""
total = []
ref = []
self.refParser(ref)
for file in glob.glob(os.path.join(str(self.batchFolder),"*.raw")):
compositions = []
trigger = 0
results = []
with open(file,'r') as fr:
name = str(file)
name = os.path.split(str(name))[-1]
current = None
for _ in range(2):
next(fr)
for line in fr:
if not line:
break
line = line.strip().split("\t")
if current:
if current.composition == line[0] and current.time == line[5] and current.timeWindow == line[8]:
foo = Isotope()
foo.isotope = line[2]
foo.mass = float(line[3])
foo.measMass = float(line[4])
foo.charge = line[1]
foo.obsInt = float(line[9])
foo.obsMax = float(line[13])
foo.expInt = float(line[6])
foo.background = float(line[10])
foo.backgroundPoint = float(line[11])
foo.noise = float(line[12])
current.isotopes.append(foo)
else:
results.append(current)
current = Analyte()
current.composition = line[0]
current.time = line[5]
current.timeWindow = line[8]
current.massWindow = line[7]
current.isotopes = []
foo = Isotope()
foo.isotope = line[2]
foo.mass = float(line[3])
foo.measMass = float(line[4])
foo.charge = line[1]
foo.obsInt = float(line[9])
foo.obsMax = float(line[13])
foo.expInt = float(line[6])
foo.background = float(line[10])
foo.backgroundPoint = float(line[11])
foo.noise = float(line[12])
current.isotopes.append(foo)
else:
current = Analyte()
current.composition = line[0]
current.time = line[5]
current.timeWindow = line[8]
current.massWindow = line[7]
current.isotopes = []
foo = Isotope()
foo.isotope = line[2]
foo.mass = float(line[3])
foo.measMass = float(line[4])
foo.charge = line[1]
foo.obsInt = float(line[9])
foo.obsMax = float(line[13])
foo.expInt = float(line[6])
foo.background = float(line[10])
foo.backgroundPoint = float(line[11])
foo.noise = float(line[12])
current.isotopes.append(foo)
results.append(current)
total.append((name,results))
for file in glob.glob(str(self.batchFolder)+"/unaligned*"+EXTENSION):
name = str(file)
name = os.path.split(str(name))[-1]
total.append((name,[]))
total.sort()
# Test chunk to see if class conversion worked
"""for i in total:
for j in i[1]:
print j.composition
if j.composition == "IgGI1H3N4F1":
for k in j.isotopes:
print k.isotope, k.charge, k.obsInt"""
#################################
# Generate the summaryFile name #
#################################
utc_datetime = datetime.utcnow()
s = utc_datetime.strftime("%Y-%m-%d-%H%MZ")
filename = s +"_"+OUTPUT
summaryFile = os.path.join(self.batchFolder,filename)
####################################################################
# Get list of analytes, required for correct alignment of clusters #
####################################################################
compositions = []
with open(self.refFile,'r') as fr:
for line in fr:
if line[0] == "#":
continue
parts=line.rstrip('\n').split('\t')
if not parts[3]:
parts[3] = TIME_WINDOW
compositions.append((parts[0],parts[1],parts[3]))
#############################
# Start writing the results #
#############################
with open(summaryFile,'w') as fw:
##############
# Parameters #
##############
fw.write("Parameter Settings\n")
fw.write("LaCyTools Version\t"+str(self.version)+"\n")
fw.write("LaCyTools Build\t"+str(self.build)+"\n")
if self.alFile != "":
fw.write("Alignment Parameters\n")
fw.write("ALIGNMENT_TIME_WINDOW\t"+str(ALIGNMENT_TIME_WINDOW)+"\n")
fw.write("ALIGNMENT_MASS_WINDOW\t"+str(ALIGNMENT_MASS_WINDOW)+"\n")
fw.write("ALIGNMENT_S_N_CUTOFF\t"+str(ALIGNMENT_S_N_CUTOFF)+"\n")
fw.write("ALIGNMENT_MIN_PEAK\t"+str(ALIGNMENT_MIN_PEAK)+"\n")
if self.calFile.get() == 1:
fw.write("Calibration Parameters\n")
fw.write("CALIB_MASS_WINDOW\t"+str(CALIB_MASS_WINDOW)+"\n")
fw.write("CALIB_S_N_CUTOFF\t"+str(CALIB_S_N_CUTOFF)+"\n")
fw.write("CALIB_MIN_PEAK\t"+str(CALIB_MIN_PEAK)+"\n")
if self.refFile != "":
fw.write("Extraction Parameters\n")
fw.write("SUM_SPECTRUM_RESOLUTION\t"+str(SUM_SPECTRUM_RESOLUTION)+"\n")
fw.write("MASS_WINDOW\t"+str(MASS_WINDOW)+"\n")
fw.write("TIME_WINDOW\t"+str(TIME_WINDOW)+"\n")
fw.write("MIN_CHARGE\t"+str(MIN_CHARGE)+"\n")
fw.write("MAX_CHARGE\t"+str(MAX_CHARGE)+"\n")
fw.write("MIN_TOTAL\t"+str(MIN_TOTAL)+"\n")
fw.write("BACKGROUND_WINDOW\t"+str(BACKGROUND_WINDOW)+"\n\n")
##############################
# Analyte Absolute Intensity #
##############################
if self.analyteIntensity.get() == 1 and self.analyteBckSub.get() == 0:
##########################
# Combined charge states #
##########################
if self.analytePerCharge.get() == 0:
# Header
header = "Absolute Intensity"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for i in total:
fw.write(str(i[0]))
for j in compositions:
sumInt = 0
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
sumInt += max(0, l.obsInt)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(sumInt))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Absolute Intensity ("+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
for k in compositions:
sumInt = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += max(0, m.obsInt)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(sumInt))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
######################################################
# Analyte Absolute Intensity (Background subtracted) #
######################################################
if self.analyteIntensity.get() == 1 and self.analyteBckSub.get() == 1:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Absolute Intensity (Background Subtracted)"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for i in total:
fw.write(str(i[0]))
for j in compositions:
sumInt = 0
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
sumInt += max(0, l.obsInt - l.background)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(sumInt))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Absolute Intensity (Background Subtracted, "+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
for k in compositions:
sumInt = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += max(0, m.obsInt - m.background)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(sumInt))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################################################
# Analyte Relative Intensity (Total Normalization) #
####################################################
if self.analyteRelIntensity.get() == 1 and self.analyteBckSub.get() == 0 and self.normalizeCluster.get() == 0:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Relative Intensity"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for i in total:
fw.write(str(i[0]))
totalIntensity = 1
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
totalIntensity += max(0, l.obsInt)
except AttributeError:
pass
for j in compositions:
sumInt = 0
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
sumInt += max(0, l.obsInt)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(totalIntensity)))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Relative Intensity ("+str(i)+"+)"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
totalIntensity = 1
for k in compositions:
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
totalIntensity += max(0, m.obsInt)
except AttributeError:
pass
for k in compositions:
sumInt = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += max(0, m.obsInt)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(totalIntensity)))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
######################################################
# Analyte Relative Intensity (Cluster Normalization) #
################################################################################################
# TODO: Check if this can not be simplified now that we have timewindow in compositions as [2] #
################################################################################################
if self.analyteRelIntensity.get() == 1 and self.analyteBckSub.get() == 0 and self.normalizeCluster.get() == 1:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Relative Intensity (Cluster Normalization)"+self.createHeader(compositions, ref)
fw.write(header)
# Actual Data
clusters = []
for i in total:
for j in compositions:
for k in i[1]:
try:
currentCluster = "-".join((k.time, k.timeWindow))
if currentCluster not in clusters:
clusters.append(currentCluster)
except AttributeError:
continue
for i in total:
clusterValues = []
fw.write(str(i[0]))
for j in clusters:
clusterTime = float(j.split("-")[0])
clusterWindow = float(j.split("-")[1])
totalIntensity = 1
for k in compositions:
for l in i[1]:
try:
if l.composition == k[0] and float(l.time) == clusterTime and float(l.timeWindow) == clusterWindow and float(k[1]) == float(l.time) and float(k[2]) == float(l.timeWindow):
for m in l.isotopes:
totalIntensity += max(0, m.obsInt)
except AttributeError:
pass
clusterValues.append((clusterTime, clusterWindow, totalIntensity))
for j in compositions:
flag = 0
sumInt = 0
for k in i[1]:
for l in clusterValues:
try:
if k.composition == j[0] and float(k.time) == l[0] and float(k.timeWindow) == l[1] and float(j[1]) == float(k.time) and float(j[2]) == float(k.timeWindow):
flag = 1
for m in k.isotopes:
sumInt += max(0, m.obsInt)
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(l[2])))
else:
fw.write("\t")
except AttributeError:
pass
if flag == 0:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Relative Intensity (Cluster Normalization, "+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
clusters = []
for j in total:
for k in compositions:
for l in j[1]:
try:
currentCluster = "-".join((l.time, l.timeWindow))
if currentCluster not in clusters:
clusters.append(currentCluster)
except AttributeError:
continue
for j in total:
clusterValues = []
fw.write(str(j[0]))
for k in clusters:
clusterTime = float(k.split("-")[0])
clusterWindow = float(k.split("-")[1])
totalIntensity = 1
for l in compositions:
for m in j[1]:
try:
if m.composition == l[0] and float(m.time) == clusterTime and float(m.timeWindow) == clusterWindow and float(l[1]) == float(m.time) and float(l[2]) == float(m.timeWindow):
for n in m.isotopes:
if int(n.charge) == i:
totalIntensity += max(0, n.obsInt)
except AttributeError:
pass
clusterValues.append((clusterTime, clusterWindow, totalIntensity))
for k in compositions:
flag = 0
sumInt = 0
for l in j[1]:
for m in clusterValues:
try:
if l.composition == k[0] and float(l.time) == m[0] and float(l.timeWindow) == m[1] and float(k[1]) == float(l.time) and float(k[2]) == float(l.timeWindow):
flag = 1
for n in l.isotopes:
if int(n.charge) == i:
sumInt += max(0, n.obsInt)
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(m[2])))
else:
fw.write("\t")
except AttributeError:
pass
if flag == 0:
fw.write("\t")
fw.write("\n")
fw.write("\n")
##################################################################
# Background Subtracted Relative Intensity (Total Normalization) #
##################################################################
if self.analyteRelIntensity.get() == 1 and self.analyteBckSub.get() == 1 and self.normalizeCluster.get() == 0:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Relative Intensity (Background Subtracted)"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for i in total:
fw.write(str(i[0]))
totalIntensity = 1
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
totalIntensity += max(0, l.obsInt - l.background)
except AttributeError:
pass
for j in compositions:
sumInt = 0
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
sumInt += max(0, l.obsInt - l.background)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(totalIntensity)))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Relative Intensity (Background Subtracted, "+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
totalIntensity = 1
for k in compositions:
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
totalIntensity += max(0, m.obsInt - m.background)
except AttributeError:
pass
for k in compositions:
sumInt = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += max(0, m.obsInt - m.background)
except AttributeError:
pass
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(totalIntensity)))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
#####################################################################
# Background Subtracted Relative Intensity (Cluster Normalization) #
#####################################################################
if self.analyteRelIntensity.get() == 1 and self.analyteBckSub.get() == 1 and self.normalizeCluster.get() == 1:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Relative Intensity (Background Subtracted, Cluster Normalization)"+self.createHeader(compositions, ref)
fw.write(header)
# Actual Data
clusters = []
for i in total:
for j in compositions:
for k in i[1]:
try:
currentCluster = "-".join((k.time, k.timeWindow))
if currentCluster not in clusters:
clusters.append(currentCluster)
except AttributeError:
continue
for i in total:
fw.write(str(i[0]))
clusterValues = []
for j in clusters:
clusterTime = float(j.split("-")[0])
clusterWindow = float(j.split("-")[1])
totalIntensity = 1
for k in compositions:
for l in i[1]:
try:
if l.composition == k[0] and float(l.time) == clusterTime and float(l.timeWindow) == clusterWindow and float(k[1]) == float(l.time) and float(k[2]) == float(l.timeWindow):
for m in l.isotopes:
totalIntensity += max(0, m.obsInt - m.background)
except AttributeError:
pass
clusterValues.append((clusterTime, clusterWindow, totalIntensity))
for j in compositions:
flag = 0
sumInt = 0
for k in i[1]:
for l in clusterValues:
try:
if k.composition == j[0] and float(k.time) == l[0] and float(k.timeWindow) == l[1] and float(j[1]) == float(k.time) and float(j[2]) == float(k.timeWindow):
flag = 1
for m in k.isotopes:
sumInt += max(0, m.obsInt - m.background)
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(l[2])))
else:
fw.write("\t")
except AttributeError:
pass
if flag == 0:
fw.write("\t")
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Relative Intensity (Background Subtracted, Cluster Normalization, "+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
clusters = []
for j in total:
for k in compositions:
for l in j[1]:
try:
currentCluster = "-".join((l.time, l.timeWindow))
if currentCluster not in clusters:
clusters.append(currentCluster)
except AttributeError:
continue
for j in total:
clusterValues = []
fw.write(str(j[0]))
for k in clusters:
clusterTime = float(k.split("-")[0])
clusterWindow = float(k.split("-")[1])
totalIntensity = 1
for l in compositions:
for m in j[1]:
try:
if m.composition == l[0] and float(m.time) == clusterTime and float(m.timeWindow) == clusterWindow and float(l[1]) == float(m.time) and float(l[2]) == float(m.timeWindow):
for n in m.isotopes:
if int(n.charge) == i:
totalIntensity += max(0, n.obsInt - n.background)
except AttributeError:
pass
clusterValues.append((clusterTime, clusterWindow, totalIntensity))
for k in compositions:
flag = 0
sumInt = 0
for l in j[1]:
for m in clusterValues:
try:
if l.composition == k[0] and float(l.time) == m[0] and float(l.timeWindow) == m[1] and float(k[1]) == float(l.time) and float(k[2]) == float(l.timeWindow):
flag = 1
for n in l.isotopes:
if int(n.charge) == i:
sumInt += max(0, n.obsInt - n.background)
if sumInt > 0:
fw.write("\t"+str(float(sumInt)/float(m[2])))
else:
fw.write("\t")
except AttributeError:
pass
if flag == 0:
fw.write("\t")
fw.write("\n")
fw.write("\n")
################################
# Analyte Background Intensity #
################################
if self.analyteBackground.get() == 1:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Background"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for i in total:
fw.write(str(i[0]))
for j in compositions:
sumInt = 0
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
sumInt += l.background
except AttributeError:
pass
fw.write("\t"+str(sumInt))
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Background ("+str(i)+"+)"+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
for k in compositions:
sumInt = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += m.background
except AttributeError:
pass
fw.write("\t"+str(sumInt))
fw.write("\n")
fw.write("\n")
#######################
# Analyte Noise Value #
#######################
if self.analyteNoise.get() == 1:
#########################
# Combined charge state #
#########################
if self.analytePerCharge.get() == 0:
# Header
header = "Noise"+self.createHeader(compositions, ref)
fw.write(header)
# Actual data
for i in total:
fw.write(str(i[0]))
for j in compositions:
sumInt = 0
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
sumInt += l.noise
except AttributeError:
pass
fw.write("\t"+str(sumInt))
fw.write("\n")
fw.write("\n")
####################
# Per charge state #
####################
if self.analytePerCharge.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Noise ("+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
for k in compositions:
sumInt = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += m.noise
except AttributeError:
pass
fw.write("\t"+str(sumInt))
fw.write("\n")
fw.write("\n")
#######################
# Alignment Residuals #
#######################
if self.alignmentQC.get() == 1:
# Get results
totalResults = []
for file in glob.glob(os.path.join(str(self.batchFolder),"*.alignment")):
resultBuffer = []
with open (file,'r') as fr:
for line in fr:
line = line.strip().split()
resultBuffer.append(line)
totalResults.append((file,resultBuffer))
# Header
header = []
for i in totalResults:
if len(i[1]) > len(header):
header = i[1][:]
fw.write("Alignment Features Residual")
for i in header[1:]:
fw.write("\t"+str(i[0]))
fw.write("\tRMS\n")
# Actual Data
for i in totalResults:
RMS = 0
fw.write(str(i[0]))
for j in header[1:]:
flag = 0
for k in i[1]:
if j[0] == k[0]:
fw.write("\t"+str(float(k[3])-float(k[1])))
RMS += (float(k[3])-float(k[1]))**2
flag = 1
if flag == 0:
fw.write("\t")
fw.write("\t"+str(math.sqrt(RMS))+"\n")
fw.write("\n")
#####################################
# Alignment Features Retention Time #
#####################################
if self.alignmentQC.get() == 1:
# Get results
totalResults = []
for file in glob.glob(os.path.join(str(self.batchFolder),"*.alignment")):
resultBuffer = []
with open (file,'r') as fr:
for line in fr:
line = line.strip().split()
resultBuffer.append(line)
totalResults.append((file,resultBuffer))
# Header
header = []
for i in totalResults:
if len(i[1]) > len(header):
header = i[1][:]
fw.write("Alignment Features Retention Time")
for i in header[1:]:
fw.write("\t"+str(i[0]))
# Actual Data
for i in totalResults:
fw.write(str(i[0]))
for j in header[1:]:
flag = 0
for k in i[1]:
if j[0] == k[0]:
fw.write("\t"+str(float(k[3])))
flag = 1
if flag == 0:
fw.write("\t")
fw.write("\n")
fw.write("\n")
##################################
# Analyte Mass Accuracy (in PPM) #
##################################
if self.qualityControl.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Mass Accuracy [ppm] ("+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual Data
for j in total:
fw.write(str(j[0]))
for k in compositions:
relContribution = 0.0
targetMass = 0.0
actualMass = 0.0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if m.expInt > relContribution and int(m.charge) == i:
relContribution = m.expInt
targetMass = m.mass
actualMass = m.measMass
except AttributeError:
pass
try:
ppm = ((actualMass - targetMass) / targetMass) * 1000000
fw.write("\t"+str(ppm))
except ZeroDivisionError:
fw.write("\t")
fw.write("\n")
fw.write("\n")
############################
# Isotopic Pattern Quality #
############################
if self.qualityControl.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "Isotopic Pattern Quality ("+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
for k in compositions:
sumInt = 0
totalExpInt = 0
qc = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if int(m.charge) == i:
sumInt += max(float(m.obsInt) - float(m.background),0)
totalExpInt += float(m.expInt)
for m in l.isotopes:
if int(m.charge) == i:
try:
maxIntensityBackCorrected = max(float(m.obsInt) - float(m.background),0)
qc += abs((maxIntensityBackCorrected / float(sumInt)) - (m.expInt/totalExpInt))
except ZeroDivisionError:
pass
except AttributeError:
pass
if qc > 0:
fw.write("\t"+str(qc))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
#########################
# Signal to Noise ratio #
#########################
if self.qualityControl.get() == 1:
minCharge = sys.maxsize
maxCharge = 0
for i in total:
for j in compositions:
for k in i[1]:
try:
if k.composition == j[0] and float(k.time) == float(j[1]) and float(k.timeWindow) == float(j[2]):
for l in k.isotopes:
if int(l.charge) < minCharge:
minCharge = int(l.charge)
elif int(l.charge) > maxCharge:
maxCharge = int(l.charge)
except AttributeError:
pass
for i in range(minCharge,maxCharge+1):
# This is a time intensive function
# Header
header = "S/N ("+str(i)+"+)"+self.createHeader(compositions, ref, i)
fw.write(header)
# Actual data
for j in total:
fw.write(str(j[0]))
for k in compositions:
expInt = 0
SN = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(k[1]) and float(l.timeWindow) == float(k[2]):
for m in l.isotopes:
if m.expInt > expInt and int(m.charge) == i:
try:
SN = (m.obsMax - m.backgroundPoint) / m.noise
except ZeroDivisionError:
pass
expInt = m.expInt
except AttributeError:
pass
if SN > 0:
fw.write("\t"+str(SN))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
#########################################
# Fraction of analytes above S/N cutoff #
#########################################
if self.spectraQualityControl.get() == 1:
ref = []
self.refParser(ref)
times = []
for i in ref:
times.append((i[4],i[5]))
chunks = collections.OrderedDict()
for i in times:
if i not in chunks.keys():
chunks['%s' % '-'.join(i)] = []
for i in ref:
chunks['%s' % '-'.join((i[4],i[5]))].append(i)
# Header
fw.write("Fraction of analytes above S/N Cutoff")
for index,i in enumerate(chunks.keys()):
fw.write("\t"+str(i))
fw.write("\n")
#for index,i in enumerate(chunks.keys()):
# Actual data
for j in total:
fw.write(str(j[0]))
for index,i in enumerate(chunks.keys()):
numberTotal = 0
numberPass = 0
for k in compositions:
expInt = 0
SN = 0
for l in j[1]:
try:
if l.composition == k[0] and float(l.time) == float(i.split("-")[0]) and float(l.timeWindow) == float(i.split("-")[1]):
numberTotal += 1
for m in l.isotopes:
if m.expInt > expInt: # and int(m.charge) == i:
try:
SN = (m.obsMax - m.backgroundPoint) / m.noise
except ZeroDivisionError:
pass
expInt = m.expInt
if SN > S_N_CUTOFF:
numberPass += 1
except AttributeError:
pass
if numberTotal > 0:
fw.write("\t"+str(float(numberPass)/float(numberTotal)))
else:
fw.write("\t")
fw.write("\n")
fw.write("\n")
def writeResults(self,results,file):
""" This function writes the resultes per file away to a raw
file.
INPUT: A file name and a list of results
OUTPUT: A raw file per measurement
"""
outFile = os.path.split(file)[-1]
outFile = outFile.split(".")[0]
outFile = outFile+".raw"
outFile = os.path.join(self.batchFolder,outFile)
with open(outFile,'w') as fw:
fw.write(str(file)+"\n")
fw.write("Composition\tCharge\tIsotope\tExact Mass\tAccurate Mass\tTime\tTheor Area\tMass Window\tTime Window\tArea\tBackground Area\tBackground Point\tNoise\tMax Intensity\n")
for index,i in enumerate(results):
composition, charge, isotope = "_".join(i[4][0].split("_")[:-2]), i[4][0].split("_")[-2], i[4][0].split("_")[-1]
fw.write(str(composition)+"\t"+str(charge)+"\t"+str(isotope)+"\t"+str(i[4][1])+"\t"+str(i[2])+"\t"+str(i[4][4])+"\t"+str(i[4][2])+"\t"+str(i[4][3])+"\t"+str(i[4][5])+"\t"+str(i[0])+"\t"+str(i[1][1])+"\t"+str(i[1][0])+"\t"+str(i[1][2])+"\t"+str(i[3])+"\n")
def batchPopup(self,master):
""" This function creates a pop up box in which all the parameters
for a batch process can be set and visualized. The window can
access and set the masters alFile, refFile and batchFolder.
The window can also call the outputPopup function (to specify
the contents of final summary) and start the actual
batchProcess function.
INPUT: None
OUTPUT: None
"""
if master.batchWindow == 1:
return
master.batchWindow = 1
self.al = tk.StringVar()
self.ref = tk.StringVar()
self.folder = tk.StringVar()
if master.alFile:
self.al.set(master.alFile)
if master.refFile:
self.ref.set(master.refFile)
if master.batchFolder:
self.folder.set(master.batchFolder)
def alButton():
master.openAlFile()
self.al.set(master.alFile)
def refButton():
master.openRefFile()
self.ref.set(master.refFile)
def batchButton():
master.openBatchFolder()
self.folder.set(master.batchFolder)
def close(self):
master.batchWindow = 0
top.destroy()
def run():
master.batchWindow = 0
top.destroy()
master.batchProcess(master)
top = self.top = tk.Toplevel()
top.protocol( "WM_DELETE_WINDOW", lambda: close(self))
self.aligns = tk.Button(top, text = "Alignment File", widt = 25, command = lambda: alButton())
self.aligns.grid(row = 2, column = 0, sticky = tk.W)
self.alLabel = tk.Label(top, textvariable = self.al, width = 25)
self.alLabel.grid(row = 2, column = 1)
self.calibrate = tk.Checkbutton(top, text = "Calibration", variable = master.calFile, onvalue = 1, offvalue = 0)
self.calibrate.grid(row = 3, column = 0, sticky = tk.W)
self.compos = tk.Button(top, text = "Reference File", width = 25, command = lambda: refButton())
self.compos.grid(row = 4, column = 0, sticky = tk.W)
self.com = tk.Label(top, textvariable = self.ref, width = 25)
self.com.grid(row = 4, column = 1)
self.batchDir = tk.Button(top, text = "Batch Directory", width = 25, command = lambda: batchButton())
self.batchDir.grid(row = 5, column = 0, sticky = tk.W)
self.batch = tk.Label(top, textvariable = self.folder, width = 25)
self.batch.grid(row = 5, column = 1)
self.output = tk.Button(top, text = "Output Format", width = 25, command = lambda: master.outputPopup(master))
self.output.grid(row = 6, column = 0,columnspan = 2)
self.run = tk.Button(top, text = "Run Batch Process", width = 25, command = lambda: run())
self.run.grid(row = 7, column = 0, columnspan = 2)
#top.lift()
# Couple the attributes to button presses
top.attributes("-topmost", True)
def outputPopup(self,master):
""" This function creates a pop up box to specify what output
should be shown in the final summary. The default value for all
variables is off (0) and by ticking a box it is set to on (1).
INPUT: None
OUTPUT: None
"""
if master.outputWindow == 1:
return
master.outputWindow = 1
def select_all(self):
master.analyteIntensity.set(1)
master.analyteRelIntensity.set(1)
master.analyteBackground.set(1)
master.analyteNoise.set(1)
master.analytePerCharge.set(1)
master.analyteBckSub.set(1)
master.normalizeCluster.set(1)
master.alignmentQC.set(1)
master.qualityControl.set(1)
master.spectraQualityControl.set(1)
def select_none(self):
master.analyteIntensity.set(0)
master.analyteRelIntensity.set(0)
master.analyteBackground.set(0)
master.analyteNoise.set(0)
master.analytePerCharge.set(0)
master.analyteBckSub.set(0)
master.normalizeCluster.set(0)
master.alignmentQC.set(0)
master.qualityControl.set(0)
master.spectraQualityControl.set(0)
def close(self):
master.outputWindow = 0
top.destroy()
top = self.top = tk.Toplevel()
top.protocol( "WM_DELETE_WINDOW", lambda: close(self))
self.all = tk.Button(top, text = "Select All", command = lambda: select_all(self))
self.all.grid(row = 0, column = 0, sticky = tk.W)
self.none = tk.Button(top, text = "Select None", command = lambda: select_none(self))
self.none.grid(row = 0, column = 1, sticky = tk.E)
self.text1 = tk.Label(top, text = "Base Outputs", font="bold")
self.text1.grid(row = 1, column = 0, sticky = tk.W)
self.text2 = tk.Label(top, text = "Output Modifiers", font="bold")
self.text2.grid(row = 1, column = 1, sticky = tk.W)
# Analyte Intensity (*,#)
self.ai = tk.Checkbutton(top, text = u"Analyte Intensity\u00B9\u00B7\u00B2", variable = master.analyteIntensity, onvalue = 1, offvalue = 0)
self.ai.grid(row = 2, column = 0, sticky = tk.W)
self.ri = tk.Checkbutton(top, text = u"Relative Intensity\u00B9\u00B7\u00B2\u00B7\u00B3", variable = master.analyteRelIntensity, onvalue = 1, offvalue = 0)
self.ri.grid(row = 3, column = 0, sticky = tk.W)
self.back = tk.Checkbutton(top, text = u"Analyte Background\u00B9", variable = master.analyteBackground, onvalue = 1, offvalue = 0)
self.back.grid(row = 4, column = 0, sticky = tk.W)
self.analNoise = tk.Checkbutton(top, text = u"Analyte Noise\u00B9", variable = master.analyteNoise, onvalue = 1, offvalue = 0)
self.analNoise.grid(row = 5, column = 0, sticky = tk.W)
self.chargeState = tk.Checkbutton(top, text = u"\u00B9Intensities per Charge State", variable = master.analytePerCharge, onvalue = 1, offvalue = 0)
self.chargeState.grid(row = 2, column = 1, sticky = tk.W)
self.bckSub = tk.Checkbutton(top, text = u"\u00B2Background subtracted Intensities", variable = master.analyteBckSub, onvalue = 1, offvalue = 0)
self.bckSub.grid(row = 3, column = 1, sticky = tk.W)
self.norClus = tk.Checkbutton(top, text = u"\u00B3Normalization per cluster", variable = master.normalizeCluster, onvalue = 1, offvalue = 0)
self.norClus.grid(row = 4, column = 1, sticky = tk.W)
self.align = tk.Checkbutton(top, text="Alignment QC", variable=master.alignmentQC, onvalue=1, offvalue=0)
self.align.grid(row = 6, column=0, sticky=tk.W)
self.qc = tk.Checkbutton(top, text = "Analyte QC", variable = master.qualityControl, onvalue = 1, offvalue = 0)
self.qc.grid(row = 7, column = 0, sticky = tk.W)
self.specQC = tk.Checkbutton(top, text="Spectral QC", variable = master.spectraQualityControl, onvalue=1, offvalue=0)
self.specQC.grid(row = 8, column = 0, sticky = tk.W)
self.button = tk.Button(top,text='Ok',command = lambda: close(self))
self.button.grid(row = 9, column = 0, columnspan = 2)
top.lift()
def binarySearch(self, array, target, high, direction):
"""Returns element number directly to the left in array 'array'
of specified element 'target', assuming 'array[x][0]' is sorted,
if direction is set as 'left'.
The return value a is such that all elements in array[:a] have
element < target, and all e in array[a:] have element >= target.
Returns element number directly to the right in array 'array'
of specified element 'target', assuming 'array[x][0]' is sorted,
if direction is set as 'right'
The return value a is such that all elements in array[:a] have
element <= target, and all e in array[a:] have element > target.
former left"""
if target >= array[0][0] and target <= array[high][0]:
a = 0
b = high
while a < b:
mid=(a+b)//2
if direction == 'left':
if array[mid][0] < target:
a=mid+1
else:
b=mid
if direction == 'right':
if array[mid][0] > target:
b = mid
else:
a = mid+1
return a
def openFile(self):
""" This function opens a Tkinter filedialog, asking the user
to select a file. The chosen file is then read (by the readData
function) and the read data is used to plot the selected spectrum
on the screen (by the plotData function).
INPUT: None
OUTPUT: None
"""
file_path = filedialog.askopenfilename()
if not file_path:
pass
else:
setattr(self,'inputFile',file_path)
def openCalFile(self):
""" This function opens a Tkinter filedialog, asking the user
to select a file. The chosen file is then set to the
self.calFile variable.
INPUT: None
OUTPUT: None
"""
file_path = filedialog.askopenfilename()
if not file_path:
pass
else:
setattr(self,'calFile',file_path)
def openBatchFolder(self):
""" This function opens a Tkinter filedialog, asking the user
to select a directory. The chosen directory is then set to the
self.batchFolder variable.
INPUT: None
OUTPUT: None
"""
folder_path = filedialog.askdirectory()
if not folder_path:
pass
else:
setattr(self,'batchFolder',folder_path)
def openRefFile(self):
""" This function opens a Tkinter filedialog, asking the user
to select a file. The chosen file is then set to the
self.refFile variable.
INPUT: None
OUTPUT: None
"""
file_path = filedialog.askopenfilename()
if not file_path:
pass
else:
setattr(self,'refFile',file_path)
def openAlFile(self):
""" This function opens a Tkinter filedialog, asking the user
to select a file. The chosen file is then set to the
self.alFile variable.
INPUT: None
OUTPUT: None
"""
file_path = filedialog.askopenfilename()
if not file_path:
pass
else:
setattr(self,'alFile',file_path)
def processBlock(self, block, array, readTimes):
""" This function processes a data block as taken from the input
file.
INPUT: A data block from the mzXML file
OUTPUT: None
"""
#if "scan num" in block:
# scan = block.split("scan num")[1]
# scan = scan.split("\"")[1]
if "retentionTime" in block:
rt = block.split("retentionTime")[1]
rt = rt.split("\"")[1]
if rt[0] == 'P':
rt = rt[2:-1]
#if "peaksCount" in block:
# peaks = block.split("peaksCount")[1]
# FIX not to catch zlib in encoded data
if '"zlib"' in block:
compression = True
# FIX for implicit no compression
else:
compression = False
if "byteOrder" in block:
byteOrder = block.split("byteOrder")[1]
byteOrder = byteOrder.split("\"")[1]
if "precision" in block:
precision = block.split("precision")[1]
precision = precision.split("\"")[1]
# FIX pairOrder is Bruker format bending
if "contentType" in block or "pairOrder" in block:
peaks = block.split('"m/z-int">')[1]
peaks = peaks.split("</peaks>")[0]
if peaks:
if readTimes:
flag = 0
for i in readTimes:
if float(rt) >= i[0] and float(rt) <= i[1]:
self.mzXMLDecoder(rt, peaks, precision, compression, byteOrder, array)
flag = 1
if flag == 0:
array.append((float(rt),None))
else:
self.mzXMLDecoder(rt, peaks, precision, compression, byteOrder, array)
######################################################
# START OF FUNCTIONS RELATED TO PARSING ANALYTE FILE #
######################################################
#def getChanceNetwork(self,(mass,carbons,hydrogens,nitrogens,oxygens17,oxygens18,sulfurs33,sulfurs34,sulfurs36)):
def getChanceNetwork(self, foo):
""" This function calculates the total chance network based on
all the individual distributions. The function multiplies all
the chances to get a single chance for a single option.
INPUT: A list containing the Analyte m/z followed by several
other lists (1 for each isotopic state).
OUTPUT: A list of float tuples (isotopic m/z, isotopic chance)
"""
mass,carbons,hydrogens,nitrogens,oxygens17,oxygens18,sulfurs33,sulfurs34,sulfurs36 = foo
totals = []
for x in itertools.product(carbons,hydrogens,nitrogens,oxygens17,oxygens18,sulfurs33,sulfurs34,sulfurs36):
i, j, k, l, m, n, o, p = x
totals.append((mass+i[0]+j[0]+k[0]+l[0]+m[0]+n[0]+o[0]+p[0],
i[1]*j[1]*k[1]*l[1]*m[1]*n[1]*o[1]*p[1]))
return totals
def mergeChances(self,totals):
""" This function merges all the isotopic chances based on the
specified resolution of the machine.
INPUT: A list of float tuples (isotopic m/z, isotopic chance)
OUTPUT: A sorted list of float tuples (isotopic m/z, isotopic
chance).
"""
results = []
newdata = {d: True for d in totals}
for k, v in totals:
if not newdata[(k,v)]: continue
newdata[(k,v)] = False
# use each piece of data only once
keys,values = [k*v],[v]
for kk, vv in [d for d in totals if newdata[d]]:
if abs(k-kk) < EPSILON:
keys.append(kk*vv)
values.append(vv)
newdata[(kk,vv)] = False
results.append((sum(keys)/sum(values),sum(values)))
return results
def calcDistribution(self, element, number):
""" This function calculates the fraction of the total intensity
that is present in each isotope of the given element based on
a binomial distribution. The function takes the name of the
element and the number of atoms of said element as an input and
returns a list of (m/z,fraction) tuples. The number of isotopes
that is returned is dependant on the distribution, once fractions
fall below 0.001 the function stops.
INPUT1: A string containing the code for the element (ie 33S)
INPUT2: An integer listing the number of atoms
OUTPUT: A list of float tuples (isotope m/z, isotope fraction).
"""
fractions = []
for i in element:
lastFraction = 0.
j = 0
while j <= number:
nCk = math.factorial(number) / (math.factorial(j) * math.factorial(number - j))
f = nCk * i[1]**j * (1 - i[1])**(number-j)
fractions.append((i[2]*j,f))
j+= 1
if f < 0.001 and f < lastFraction:
break
lastFraction = f
return fractions
def parseAnalyte(self,Analyte):
""" This function splits the Analyte input string into a parts
and calculates the total number of each element of interest per
Analyte. The function will then attach further elements based on
the user specified mass modifiers before calling the isotopic
distribution function. The function finally returns a list
containing the analyte mass and distribution lists for each
isotopic state.
INPUT: A string containing the Analyte (ie 'H4N4')
OUTPUT: A list containing the Analyte m/z followed by several
other lists (1 for each isotopic state).
"""
results = []
mass = 0
numCarbons = 0
numHydrogens = 0
numNitrogens = 0
numOxygens = 0
numSulfurs = 0
totalElements = 0
units = ["".join(x) for _,x in itertools.groupby(Analyte,key=str.isdigit)]
# Calculate the bass composition values
for index,j in enumerate(units):
present_flag = False
for k in UNITS:
if j == k:
try:
int(units[index+1])
except:
messagebox.showinfo(
"Error Message","There is no number "+
"specified for building block "+
str(j))
sys.exit()
mass += float(BLOCKS[k]['mass']) * float(units[index+1])
numCarbons += int(BLOCKS[k]['carbons']) * int(units[index+1])
numHydrogens += int(BLOCKS[k]['hydrogens']) * int(units[index+1])
numNitrogens += int(BLOCKS[k]['nitrogens']) * int(units[index+1])
numOxygens += int(BLOCKS[k]['oxygens']) * int(units[index+1])
numSulfurs += int(BLOCKS[k]['sulfurs']) * int(units[index+1])
present_flag = True
if present_flag == False and j.isalpha():
messagebox.showinfo(
"Error Message","The specified building block of "+
str(j)+" is unknown. Did you create the building "+
"block in the LaCyTools blocks directory?")
sys.exit()
# Attach the mass modifier values
for j in MASS_MODIFIERS:
mass += float(BLOCKS[j]['mass'])
numCarbons += float(BLOCKS[j]['carbons'])
numHydrogens += int(BLOCKS[j]['hydrogens'])
numNitrogens += int(BLOCKS[j]['nitrogens'])
numOxygens += int(BLOCKS[j]['oxygens'])
numSulfurs += int(BLOCKS[j]['sulfurs'])
# Calculate the distribution for the given value
carbons = self.calcDistribution(C,numCarbons)
hydrogens = self.calcDistribution(H,numHydrogens)
nitrogens = self.calcDistribution(N,numNitrogens)
oxygens17 = self.calcDistribution(O17,numOxygens)
oxygens18 = self.calcDistribution(O18,numOxygens)
sulfurs33 = self.calcDistribution(S33,numSulfurs)
sulfurs34 = self.calcDistribution(S34,numSulfurs)
sulfurs36 = self.calcDistribution(S36,numSulfurs)
return ((mass,carbons,hydrogens,nitrogens,oxygens17,oxygens18,sulfurs33,sulfurs34,sulfurs36))
def initCompositionMasses(self, file):
""" This function reads the composition file. Calculates the
masses for the compositions read from the composition file.
The function then calculates the mass and fraction of total
ions that should be theoretically present in. The final output
is a modified reference list containing each analyte's structure
and window followed by a list of isotope m/z and isotopic
fraction.
INPUT: A string containing the path of the composition file
OUTPUT: None
"""
lines = []
with open(file,'r') as fr:
for line in fr:
line = line.rstrip()
lines.append(line)
# Chop composition into sub units and get exact mass & carbon count
analyteFile = os.path.join(self.batchFolder,"analytes.ref")
if OVERWRITE_ANALYTES == False:
print ("USING EXISTING REFERENCE FILE")
return
elif OVERWRITE_ANALYTES == True:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tPRE-PROCESSING REFERENCE FILE\n")
with open(analyteFile,'w') as fw:
fw.write("# Peak\tm/z\tRel Area\twindow\trt\ttime window\tCalibration\n")
for i in lines:
try:
if i[0] == "#":
continue
except IndexError:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tIncorrect line observed in: "+str(analyteFile)+"\n")
except:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tUnexpected Error: "+str(sys.exc_info()[0])+"\n")
i = i.split("\t")
# Initialize variables
massWindow = MASS_WINDOW
timeWindow = TIME_WINDOW
minCharge = MIN_CHARGE
maxCharge = MAX_CHARGE
calibration = False
# Check optional variables
if len(i) >= 2:
time = float(i[1])
if len(i) > 2:
if i[2]:
massWindow = float(i[2])
if len(i) > 3:
if i[3]:
timeWindow = int(i[3])
if len(i) > 4:
if i[4]:
minCharge = int(i[4])
if len(i) > 5:
if i[5]:
maxCharge = int(i[5])
if len(i) > 6:
if i[6]:
calibration = True
# End of variable check
values = self.parseAnalyte(i[0])
totals = self.getChanceNetwork(values)
results = self.mergeChances(totals)
results = self.selectIsotopes(results)
# Write analyte file
for j in range(minCharge,maxCharge+1):
# Adding the padding windows to determine IPQ
for k in range(-EXTRACTION_PADDING, 0):
fw.write(str(i[0])+"_"+str(j)+"_"+str(k)+"\t"+str((results[0][0]+k*BLOCKS[CHARGE_CARRIER[0]]['mass']+j*BLOCKS[CHARGE_CARRIER[0]]['mass'])/j)+"\t"+str(0)+"\t"+str(massWindow)+"\t"+str(time)+"\t"+str(timeWindow)+"\tFalse\n")
maxIsotope = max(results,key=lambda tup:tup[1])[1]
for k in results:
if calibration == True and k[1] == maxIsotope:
fw.write(str(i[0])+"_"+str(j)+"_"+str(k[2])+"\t"+str((k[0]+j*BLOCKS[CHARGE_CARRIER[0]]['mass'])/j)+"\t"+str(k[1])+"\t"+str(massWindow)+"\t"+str(time)+"\t"+str(timeWindow)+"\tTrue\n")
else:
fw.write(str(i[0])+"_"+str(j)+"_"+str(k[2])+"\t"+str((k[0]+j*BLOCKS[CHARGE_CARRIER[0]]['mass'])/j)+"\t"+str(k[1])+"\t"+str(massWindow)+"\t"+str(time)+"\t"+str(timeWindow)+"\tFalse\n")
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tPRE-PROCESSING COMPLETE\n")
else:
print ("Incorrect value for the OVERWRITE_ANALYTES parameter")
####################################################
# END OF FUNCTIONS RELATED TO PARSING ANALYTE FILE #
####################################################
def extractData(self,ref,array,results):
""" This is the controller function for quantitation of data.
INPUT: A ref file and input file
OUTPUT: A list of results consisting of (area, 'background',
accurate mass, maximum intensity point and the current
analyte)
"""
if self.refFile == "":
messagebox.showinfo("Error Message","No reference file selected")
return
if self.inputFile == "":
messagebox.showinfo("Error Message","No input file selected")
return
analyteBuffer = ""
for i in ref:
intensity = 0
x_points = []
y_points = []
maximum = (0,0)
maxInt = 0
# Not pretty but it fixes the charge not being taken into account
# with extraction and background determination
charge = int(i[0].split("_")[-2])
if '#' in i[0]:
pass
else:
lowMz = self.binarySearch(array,float(i[1])-float(i[3]),len(array)-1,'left')
highMz = self.binarySearch(array,float(i[1])+float(i[3]),len(array)-1,'right')
if "_".join(i[0].split("_")[:-1]) == analyteBuffer:
pass
else:
background = self.getBackground(array, float(i[1]), charge, float(i[3]))
analyteBuffer = "_".join(i[0].split("_")[:-1])
if lowMz and highMz:
try:
range(lowMz,highMz)
except TypeError:
print ("\nReference: "+str(i[0])+" has incorrect m/z parameters")
input("Press ENTER to exit")
sys.exit()
for k in range(lowMz, highMz):
# Get maximum point for S/N calculation
if int(array[k][1]) > maxInt:
maxInt = int(array[k][1])
if EXTRACTION_TYPE == 1:
if int(array[k][1]) > intensity:
intensity = int(array[k][1])
elif EXTRACTION_TYPE == 0:
intensity += int(array[k][1])
elif EXTRACTION_TYPE == 2:
intensity += array[k][1] * ((array[highMz][0] - array[lowMz][0]) / (highMz - lowMz))
# We need these to get the local maxima
x_points.append(array[k][0])
y_points.append(array[k][1])
######################################################################
# Only spend time on doing this if we actually wanted the PPM Errors #
# This is not being used yet, but should! #
######################################################################
if self.qualityControl.get() == 1:
try:
newX = numpy.linspace(x_points[0],x_points[-1],2500*(x_points[-1]-x_points[0]))
f = InterpolatedUnivariateSpline(x_points,y_points)
ySPLINE = f(newX)
for index, j in enumerate(ySPLINE):
if j > maximum[1]:
maximum = (newX[index],j)
except ValueError:
data = zip(x_points,y_points)
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tGuassian Curve Fit failed for analyte: "+str(i[0])+", reverting to non fitted local maximum\n")
for j in data:
if j[1] > maximum[1]:
maximum = (j[0],j[1])
except:
print ("Analyte: "+str(i[0])+" is being troublesome, kill it")
else:
intensity = 0
background = (0,0,0)
maximum = (0,0)
# Check if maxima above S/N cut-off
results.append((intensity,background,maximum[0],maxInt,i))
if self.batchProcessing == 1:
return results
else:
for i in results:
print (i)
def getBackground(self, array, target, charge, width):
""" This functin will determine the background and noise for a
given analyte.
INPUT: The spectrum in array form, the exact m/z of the analyte,
the charge of the analyte and the m/z window
OUTPUT: A list of (the average background, the background area
and the noise)
"""
backgroundPoint = 1000000000000000000000000000000000000000000000000000 # Ridiculous start value
totals = []
for i in numpy.arange(-BACKGROUND_WINDOW,BACKGROUND_WINDOW,1.0/charge):
windowAreas = []
windowIntensities = []
windowMz = []
begin = self.binarySearch(array,(float(target)-i*C[0][2])-float(width),len(array)-1,'left')
end = self.binarySearch(array,(float(target)-i*C[0][2])+float(width),len(array)-1,'right')
if begin == None or end == None:
print ("Specified m/z value of " +str((float(target)-i*C[0][2])-float(width)) + " or " + str((float(target)-i*C[0][2])+float(width))+ " outside of spectra range")
input("Press enter to exit")
sys.exit()
for j in array[begin:end]:
windowAreas.append(j[1] * ((array[end][0] - array[begin][0]) / (end - begin)))
windowIntensities.append(j[1])
windowMz.append(j[0])
totals.append((windowAreas,windowIntensities,windowMz))
# Find the set of 5 consecutive windows with lowest average intensity
if self.background == "MIN":
for i in range(0,(2*BACKGROUND_WINDOW)-4):
mix = totals[i][1]+totals[i+1][1]+totals[i+2][1]+totals[i+3][1]+totals[i+4][1]
avgBackground = numpy.average([sum(totals[i][0]),sum(totals[i+1][0]),sum(totals[i+2][0]),sum(totals[i+3][0]),sum(totals[i+4][0])])
dev = numpy.std(mix)
avg = numpy.average(mix)
if avg < backgroundPoint:
backgroundPoint = avg
backgroundArea = avgBackground
if self.noise == "RMS":
noise = dev
elif self.noise == "MM":
noise = max(mix) - min(mix)
# Find the set of 5 consecutive windows with median average intensity
elif self.background == "MEDIAN":
values = []
for i in range(0, (2*BACKGROUND_WINDOW)-4):
mix = totals[i][1]+totals[i+1][1]+totals[i+2][1]+totals[i+3][1]+totals[i+4][1]
avgBackground = numpy.average([sum(totals[i][0]), sum(totals[i+1][0]), sum(totals[i+2][0]), sum(totals[i+3][0]), sum(totals[i+4][0])])
dev = numpy.std(mix)
avg = numpy.average(mix)
if self.noise == "RMS":
noise = dev
elif self.noise == "MM":
noise = max(mix) - min(mix)
values.append((avg, avgBackground, noise))
sortedValues = sorted(values, key=lambda x: x[0])
a, b, c = zip(*sortedValues)
backgroundPoint = a[len(a)//2]
backgroundArea = b[len(b)//2]
noise = c[len(c)//2]
# NOBAN METHOD
elif self.background == "NOBAN":
dataPoints = []
for i in range(0, (2*BACKGROUND_WINDOW)):
dataPoints.extend(totals[i][1])
sortedData = sorted(dataPoints)
startSize = int(0.25 * float(len(sortedData)))
currSize = startSize
currAverage = numpy.average(sortedData[0:currSize])
if self.noise == "MM":
currNoise = max(sortedData[0:currSize]) - min(sortedData[0:currSize])
elif self.noise == "RMS":
currNoise = numpy.std(sortedData[0:currSize])
directionFlag = 0
for k in range(0,len(sortedData)-(startSize+1)):
if sortedData[currSize+1] < currAverage + 3 * currNoise:
directionFlag == 1
currSize += 1
currAverage = numpy.average(sortedData[0:currSize])
if self.noise == "MM":
currNoise = max(sortedData[0:currSize]) - min(sortedData[0:currSize])
elif self.noise == "RMS":
currNoise = numpy.std(sortedData[0:currSize])
else:
if sortedData[currSize-1] > currAverage + 3 * currNoise and directionFlag == 0:
currSize -= 1
currAverage = numpy.average(sortedData[0:currSize])
if self.noise == "MM":
currNoise = max(sortedData[0:currSize]) - min(sortedData[0:currSize])
elif self.noise == "RMS":
currNoise = numpy.std(sortedData[0:currSize])
else:
break
# Get Area
# Get length and spacing of window
windowLength = 0
for i in range(0, (2*BACKGROUND_WINDOW)):
if len(totals[i][1]) > windowLength:
windowLength = len(totals[i][1])
spacing = (max(totals[i][2])-min(totals[i][2])) / windowLength
currArea = windowLength * (currAverage * spacing)
# Assign values to generic names
backgroundPoint = currAverage
backgroundArea = currArea
noise = currNoise
return (backgroundPoint,backgroundArea,noise)
def matchFeatureTimes(self, features):
""" This function takes a list of features/times and combines
them into a singe list, useful for reading only relevant
scans later in the program.
INPUT: A list of (m/z,rt) tuples
OUTPUT: A list of (rt,rt) tuples
"""
wanted = []
features = sorted(features, key=lambda x:x[1])
current = (float(features[0][1])-ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW, float(features[0][1])+ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW)
for i in features:
if float(i[1])-ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW >= current[0] and float(i[1])-ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW < current[1]:
if float(i[1])+ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW > current[1]:
current = (current[0],float(i[1])+ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW)
else:
wanted.append(current)
current = (float(i[1])-ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW, float(i[1])+ALIGNMENT_BACKGROUND_MULTIPLIER*ALIGNMENT_TIME_WINDOW)
wanted.append(current)
return wanted
def matchAnalyteTimes(self, ref):
""" This function takes a list of references and creates a list
of time tuples, that is needed to read only relevant scans later
in the program.
INPUT: A list of references (name, mz, int, window and so forth)
OUTPUT: A list of (rt,rt) tuples
"""
wanted = []
ref = sorted(ref,key=lambda x:x[4])
for i in ref:
if (float(i[4])-float(i[5])) not in wanted:
wanted.append((float(i[4])-float(i[5]),float(i[4])+float(i[5])))
return list(self.merge_ranges(wanted))
def merge_ranges(self,ranges):
"""
Merge overlapping and adjacent ranges and yield the merged ranges
in order. The argument must be an iterable of pairs (start, stop).
>>> list(merge_ranges([(5,7), (3,5), (-1,3)]))
[(-1, 7)]
>>> list(merge_ranges([(5,6), (3,4), (1,2)]))
[(1, 2), (3, 4), (5, 6)]
>>> list(merge_ranges([]))
[]
Source = http://stackoverflow.com/questions/24130745/convert-generator-object-to-list-for-debugging
"""
ranges = iter(sorted(ranges))
current_start, current_stop = next(ranges)
for start, stop in ranges:
if start > current_stop:
# Gap between segments: output current segment and start a new one.
yield current_start, current_stop
current_start, current_stop = start, stop
else:
# Segments adjacent or overlapping: merge.
current_stop = max(current_stop, stop)
yield current_start, current_stop
def readData(self, array, readTimes):
""" This function reads mzXML files and has the scans decoded on
a per scan basis. The scans are identified by getting the line
number of the beginning and ending tag for a scan.
INPUT: file handle
OUTPUT: TBA
"""
header = True
started = False
block = ""
with open(self.inputFile,'r') as fr:
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tProcessing "+str(self.inputFile)+"\n")
for number, line in enumerate(fr):
if '</dataProcessing>' in line:
header = False
if '<scan num="' in line and header == False:
started = True
if started == True:
block +=line
if '</scan>' in line and header == False and started == True:
self.processBlock(block, array, readTimes)
started = False
block = ""
#print "Finished processing "+str(self.inputFile)
def readPTData(self, array, readTimes):
""" TODO by Genadij Razdorov
"""
if self.log == True:
with open('LaCyTools.log', 'a') as flog:
flog.write(str(datetime.now())+ "\tProcessing "+str(self.inputFile)+"\n")
i = self.inputFileIdx
for row in self.ptFile.root.scans.where("sample == i"):
rt, art, idx, size = row[2:]
if art != 0:
rt = art
if readTimes:
for s, e in readTimes:
if rt >= s and rt <= e:
break
else:
array.append((rt, None))
continue
mzs = self.ptFile.root.mzs[idx][:size]
Is = self.ptFile.root.Is[idx][:size]
array.append((rt, numpy.vstack((numpy.cumsum(mzs), Is)).T))
def refParser(self, ref):
"""Reads the reference file and fills the list 'ref' with names
and parameters inherent to the chosen analytes to be integrated.
INPUT: An empty ref list
OUTPUT: A filled ref list
"""
with open(os.path.join(self.batchFolder,"analytes.ref"),'r') as fr:
for line in fr:
if line[0] == "#":
continue
parts=line.rstrip('\n').split('\t')
ref.append(parts)
def mzXMLDecoder(self, rt, peaks, precision, compression, byteOrder, array):
""" This function parses the encoded string from an mzXML file.
The decoded data is finally added to the data array containing
the entire measurement.
INPUT: An encoded string and the data array containing all of
the measuremen that has been processed up to this point
OUTPUT: A data array containing all of the measuremen that has
been processed up to this point
"""
endian = ">"
if byteOrder == 'little':
endian = '<'
elif byteOrder == 'big':
endian = '>'
# get precision
if int(precision) == 64:
precision = '8'
else:
precision = '4'
# decode data
data = base64.b64decode(peaks)
# decompression
if compression == True:
data = zlib.decompress(data)
data = numpy.frombuffer(data, dtype=endian + 'f' + precision)
# format
new = numpy.vstack((data[::2], data[1::2])).T
# list notation
array.append((float(rt),new))
# Call the main app
root = tk.Tk()
app = App(root)
root.mainloop()
| apache-2.0 |
Subsets and Splits