repo_name
stringlengths 6
100
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 935
727k
| license
stringclasses 15
values |
---|---|---|---|---|---|
WangWenjun559/Weiss | summary/sumy/sklearn/ensemble/tests/test_weight_boosting.py | 32 | 15697 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
return_indicator=True,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
| apache-2.0 |
vipullakhani/mi-instrument | mi/core/instrument/file_publisher.py | 5 | 4306 | """
@package mi.core.instrument.publisher
@file /mi-instrument/mi/core/instrument/file_publisher.py
@author Peter Cable
@brief Event file publisher
Release notes:
initial release
"""
import cPickle as pickle
import json
import numpy as np
import pandas as pd
import xarray as xr
from mi.core.instrument.publisher import Publisher
from mi.logging import log
class CountPublisher(Publisher):
def __init__(self, allowed):
super(CountPublisher, self).__init__(allowed)
self.total = 0
def _publish(self, events, headers):
for e in events:
try:
json.dumps(e)
except (ValueError, UnicodeDecodeError) as err:
log.exception('Unable to publish event: %r %r', e, err)
count = len(events)
self.total += count
log.info('Publish %d events (%d total)', count, self.total)
class FilePublisher(Publisher):
def __init__(self, *args, **kwargs):
super(FilePublisher, self).__init__(*args, **kwargs)
self.samples = {}
@staticmethod
def _flatten(sample):
values = sample.pop('values')
for each in values:
sample[each['value_id']] = each['value']
return sample
def _publish(self, events, headers):
for event in events:
# file publisher only applicable to particles
if event.get('type') != 'DRIVER_ASYNC_EVENT_SAMPLE':
continue
particle = event.get('value', {})
stream = particle.get('stream_name')
if stream:
particle = self._flatten(particle)
self.samples.setdefault(stream, []).append(particle)
def to_dataframes(self):
data_frames = {}
for particle_type in self.samples:
data_frames[particle_type] = self.fix_arrays(pd.DataFrame(self.samples[particle_type]))
return data_frames
def to_datasets(self):
datasets = {}
for particle_type in self.samples:
datasets[particle_type] = self.fix_arrays(pd.DataFrame(self.samples[particle_type]), return_as_xr=True)
return datasets
@staticmethod
def fix_arrays(data_frame, return_as_xr=False):
# round-trip the dataframe through xray to get the multidimensional indexing correct
new_ds = xr.Dataset()
for each in data_frame:
if data_frame[each].dtype == 'object' and isinstance(data_frame[each].values[0], list):
data = np.array([np.array(x) for x in data_frame[each].values])
new_ds[each] = xr.DataArray(data)
else:
new_ds[each] = data_frame[each]
if return_as_xr:
return new_ds
return new_ds.to_dataframe()
def write(self):
log.info('Writing output files...')
self._write()
log.info('Done writing output files...')
def _write(self):
raise NotImplemented
class CsvPublisher(FilePublisher):
def _write(self):
dataframes = self.to_dataframes()
for particle_type in dataframes:
file_path = '%s.csv' % particle_type
dataframes[particle_type].to_csv(file_path)
class PandasPublisher(FilePublisher):
def _write(self):
dataframes = self.to_dataframes()
for particle_type in dataframes:
# very large dataframes don't work with pickle
# split if too large
df = dataframes[particle_type]
max_size = 5000000
if len(df) > max_size:
num_slices = len(df) / max_size
slices = np.array_split(df, num_slices)
for index, df_slice in enumerate(slices):
file_path = '%s_%d.pd' % (particle_type, index)
df_slice.to_pickle(file_path)
else:
log.info('length of dataframe: %d', len(df))
file_path = '%s.pd' % particle_type
dataframes[particle_type].to_pickle(file_path)
class XarrayPublisher(FilePublisher):
def _write(self):
datasets = self.to_datasets()
for particle_type in datasets:
file_path = '%s.xr' % particle_type
with open(file_path, 'w') as fh:
pickle.dump(datasets[particle_type], fh, protocol=-1)
| bsd-2-clause |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/backend_bases.py | 10 | 106046 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
:class:`ShowBase`
The base class for the Show class of each interactive backend;
the 'show' callable is then set to Show.__call__, inherited from
ShowBase.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import os
import sys
import warnings
import time
import io
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
#import matplotlib.path as path
from matplotlib import rcParams
from matplotlib import is_interactive
from matplotlib import get_backend
from matplotlib._pylab_helpers import Gcf
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
import matplotlib.tight_bbox as tight_bbox
import matplotlib.textpath as textpath
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation
try:
from importlib import import_module
except:
# simple python 2.6 implementation (no relative imports)
def import_module(name):
__import__(name)
return sys.modules[name]
try:
from PIL import Image
_has_pil = True
except ImportError:
_has_pil = False
_default_filetypes = {
'ps': 'Postscript',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
_default_backends = {
'ps': 'matplotlib.backends.backend_ps',
'eps': 'matplotlib.backends.backend_ps',
'pdf': 'matplotlib.backends.backend_pdf',
'pgf': 'matplotlib.backends.backend_pgf',
'png': 'matplotlib.backends.backend_agg',
'raw': 'matplotlib.backends.backend_agg',
'rgba': 'matplotlib.backends.backend_agg',
'svg': 'matplotlib.backends.backend_svg',
'svgz': 'matplotlib.backends.backend_svg',
}
def register_backend(format, backend, description=None):
"""
Register a backend for saving to a given file format.
format : str
File extention
backend : module string or canvas class
Backend for handling file output
description : str, optional
Description of the file type. Defaults to an empty string
"""
if description is None:
description = ''
_default_backends[format] = backend
_default_filetypes[format] = description
def get_registered_canvas_class(format):
"""
Return the registered default canvas for given file format.
Handles deferred import of required backend.
"""
if format not in _default_backends:
return None
backend_class = _default_backends[format]
if cbook.is_string_like(backend_class):
backend_class = import_module(backend_class).FigureCanvas
_default_backends[format] = backend_class
return backend_class
class ShowBase(object):
"""
Simple base class to generate a show() callable in backends.
Subclass must override mainloop() method.
"""
def __call__(self, block=None):
"""
Show all figures. If *block* is not None, then
it is a boolean that overrides all other factors
determining whether show blocks by calling mainloop().
The other factors are:
it does not block if run inside ipython's "%pylab" mode
it does not block in interactive mode.
"""
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
manager.show()
if block is not None:
if block:
self.mainloop()
return
else:
return
# Hack: determine at runtime whether we are
# inside ipython in pylab mode.
from matplotlib import pyplot
try:
ipython_pylab = not pyplot.show._needmain
# IPython versions >= 0.10 tack the _needmain
# attribute onto pyplot.show, and always set
# it to False, when in %pylab mode.
ipython_pylab = ipython_pylab and get_backend() != 'WebAgg'
# TODO: The above is a hack to get the WebAgg backend
# working with ipython's `%pylab` mode until proper
# integration is implemented.
except AttributeError:
ipython_pylab = False
# Leave the following as a separate step in case we
# want to control this behavior with an rcParam.
if ipython_pylab:
return
if not is_interactive() or get_backend() == 'WebAgg':
self.mainloop()
def mainloop(self):
pass
class RendererBase(object):
"""An abstract base class to handle drawing/rendering operations.
The following methods must be implemented in the backend for full
functionality (though just implementing :meth:`draw_path` alone would
give a highly capable backend):
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_gouraud_triangle`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_text`
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
self._text2path = textpath.TextToPath()
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`.
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans +
transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
"""
Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied. *offset_position* may be either "screen" or "data"
depending on the space that the offsets are in.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
:meth:`draw_path`. Some backends may want to override this in
order to render each set of path data only once, and then
reference that path multiple times with the different offsets,
colors, styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transforms.Affine2D(transform)))
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_ids, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
path, transform = path_id
transform = transforms.Affine2D(
transform.get_matrix()).translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if edgecolors is None:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], np.float_)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None], 'screen')
def draw_gouraud_triangle(self, gc, points, colors, transform):
"""
Draw a Gouraud-shaded triangle.
*points* is a 3x2 array of (x, y) points for the triangle.
*colors* is a 3x4 array of RGBA colors for each point of the
triangle.
*transform* is an affine transform to apply to the points.
"""
raise NotImplementedError
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draws a series of Gouraud triangles.
*points* is a Nx3x2 array of (x, y) points for the trianglex.
*colors* is a Nx3x4 array of RGBA colors for each point of the
triangles.
*transform* is an affine transform to apply to the points.
"""
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = Affine2D(all_transforms[i % Ntransforms])
yield path, transform + master_transform
def _iter_collection_uses_per_path(self, paths, all_transforms,
offsets, facecolors, edgecolors):
"""
Compute how many times each raw path object returned by
_iter_collection_raw_paths would be used when calling
_iter_collection. This is intended for the backend to decide
on the tradeoff between using the paths in-line and storing
them once and reusing. Rounds up in case the number of uses
is not the same for every path.
"""
Npaths = len(paths)
if Npaths == 0 or (len(facecolors) == 0 and len(edgecolors) == 0):
return 0
Npath_ids = max(Npaths, len(all_transforms))
N = max(Npath_ids, len(offsets))
return (N + Npath_ids - 1) // Npath_ids
def _iter_collection(self, gc, master_transform, all_transforms,
path_ids, offsets, offsetTrans, facecolors,
edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Ntransforms = len(all_transforms)
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc0 = self.new_gc()
gc0.copy_properties(gc)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if offset_position == 'data':
if Ntransforms:
transform = (
Affine2D(all_transforms[i % Ntransforms]) +
master_transform)
else:
transform = master_transform
xo, yo = transform.transform_point((xo, yo))
xp, yp = transform.transform_point((0, 0))
xo = -(xp - xo)
yo = -(yp - yo)
if not (np.isfinite(xo) and np.isfinite(yo)):
continue
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
if Nlinewidths:
gc0.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc0.set_dashes(*linestyles[i % Nlinestyles])
fg = edgecolors[i % Nedgecolors]
if len(fg) == 4:
if fg[3] == 0.0:
gc0.set_linewidth(0)
else:
gc0.set_foreground(fg)
else:
gc0.set_foreground(fg)
if rgbFace is not None and len(rgbFace) == 4:
if rgbFace[3] == 0:
rgbFace = None
gc0.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc0.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc0, rgbFace
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im):
"""
Draw the image instance into the current axes;
*gc*
a GraphicsContext containing clipping information
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
override this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def option_scale_image(self):
"""
override this method for renderers that support arbitrary
scaling of image (most of the vector backend).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text baseline in display coords
*s*
the text string
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
*mtext*
a :class:`matplotlib.text.Text` instance
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be plotted along with
your text.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _get_text_path_transform(self, x, y, s, prop, angle, ismath):
"""
return the text path and transform
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
if ismath == "TeX":
verts, codes = text2path.get_text_path(prop, s, ismath=False,
usetex=True)
else:
verts, codes = text2path.get_text_path(prop, s, ismath=ismath,
usetex=False)
path = Path(verts, codes)
angle = angle / 180. * 3.141592
if self.flipy():
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, self.height - y)
else:
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, y)
return path, transform
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
path, transform = self._get_text_path_transform(
x, y, s, prop, angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
if ismath == 'TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self._text2path.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
dpi = self.points_to_pixels(72)
if ismath:
dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
return dims[0:3] # return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
size = prop.get_size_in_points()
font.set_size(size, dpi)
# the width and height of unrotated string
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, e.g., postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
"""
Used in MixedModeRenderer. Switch to the raster renderer.
"""
pass
def stop_rasterizing(self):
"""
Used in MixedModeRenderer. Switch back to the vector renderer
and draw the contents of the raster renderer as an image on
the vector renderer.
"""
pass
def start_filter(self):
"""
Used in AggRenderer. Switch to a temporary renderer for image
filtering effects.
"""
pass
def stop_filter(self, filter_func):
"""
Used in AggRenderer. Switch back to the original renderer.
The contents of the temporary renderer is processed with the
*filter_func* and is drawn on the original renderer as an
image.
"""
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid': (None, None),
'dashed': (0, (6.0, 6.0)),
'dashdot': (0, (3.0, 5.0, 1.0, 5.0)),
'dotted': (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'round'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0, 1.0)
self._orig_color = (0.0, 0.0, 0.0, 1.0)
self._hatch = None
self._url = None
self._gid = None
self._snap = None
self._sketch = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._forced_alpha = gc._forced_alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._orig_color = gc._orig_color
self._hatch = gc._hatch
self._url = gc._url
self._gid = gc._gid
self._snap = gc._snap
self._sketch = gc._sketch
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack
"""
pass
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox`
instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_forced_alpha(self):
"""
Return whether the value given by get_alpha() should be used to
override any other alpha-channel values.
"""
return self._forced_alpha
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three or four floats from 0-1.
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_gid(self):
"""
Return the object identifier if one is set, None otherwise.
"""
return self._gid
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
If ``alpha=None`` (the default), the alpha components of the
foreground and fill colors will be used to set their respective
transparencies (where applicable); otherwise, ``alpha`` will override
them.
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._alpha = 1.0
self._forced_alpha = False
self.set_foreground(self._orig_color)
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b:
self._antialiased = 1
else:
self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points.
``(None, None)`` specifies a solid line
"""
if dash_list is not None:
dl = np.asarray(dash_list)
if np.any(dl <= 0.0):
raise ValueError("All values in the dash list must be positive")
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGBA=False):
"""
Set the foreground color. fg can be a MATLAB format string, a
html hex color string, an rgb or rgba unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
If you know fg is rgba, set ``isRGBA=True`` for efficiency.
"""
self._orig_color = fg
if self._forced_alpha:
self._rgb = colors.colorConverter.to_rgba(fg, self._alpha)
elif isRGBA:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._orig_color = frac
self._rgb = (frac, frac, frac, self._alpha)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted'). One may specify customized dash styles by providing
a tuple of (offset, dash pairs). For example, the predefiend
linestyles have following values.:
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
"""
if style in self.dashd:
offset, dashes = self.dashd[style]
elif isinstance(style, tuple):
offset, dashes = style
else:
raise ValueError('Unrecognized linestyle: %s' % str(style))
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_gid(self, id):
"""
Sets the id.
"""
self._gid = id
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
def get_hatch_path(self, density=6.0):
"""
Returns a Path for the current hatch.
"""
if self._hatch is None:
return None
return Path.hatch(self._hatch, density)
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
class TimerBase(object):
'''
A base class for providing timer events, useful for things animations.
Backends need to implement a few specific methods in order to use their
own timing mechanisms so that the timer events are integrated into their
event loops.
Mandatory functions that must be implemented:
* `_timer_start`: Contains backend-specific code for starting
the timer
* `_timer_stop`: Contains backend-specific code for stopping
the timer
Optional overrides:
* `_timer_set_single_shot`: Code for setting the timer to
single shot operating mode, if supported by the timer
object. If not, the `Timer` class itself will store the flag
and the `_on_timer` method should be overridden to support
such behavior.
* `_timer_set_interval`: Code for setting the interval on the
timer, if there is a method for doing so on the timer
object.
* `_on_timer`: This is the internal function that any timer
object should call, which will handle the task of running
all callbacks that have been set.
Attributes:
* `interval`: The time between timer events in
milliseconds. Default is 1000 ms.
* `single_shot`: Boolean flag indicating whether this timer
should operate as single shot (run once and then
stop). Defaults to `False`.
* `callbacks`: Stores list of (func, args) tuples that will be
called upon timer events. This list can be manipulated
directly, or the functions `add_callback` and
`remove_callback` can be used.
'''
def __init__(self, interval=None, callbacks=None):
#Initialize empty callbacks list and setup default settings if necssary
if callbacks is None:
self.callbacks = []
else:
self.callbacks = callbacks[:] # Create a copy
if interval is None:
self._interval = 1000
else:
self._interval = interval
self._single = False
# Default attribute for holding the GUI-specific timer object
self._timer = None
def __del__(self):
'Need to stop timer and possibly disconnect timer.'
self._timer_stop()
def start(self, interval=None):
'''
Start the timer object. `interval` is optional and will be used
to reset the timer interval first if provided.
'''
if interval is not None:
self._set_interval(interval)
self._timer_start()
def stop(self):
'''
Stop the timer.
'''
self._timer_stop()
def _timer_start(self):
pass
def _timer_stop(self):
pass
def _get_interval(self):
return self._interval
def _set_interval(self, interval):
# Force to int since none of the backends actually support fractional
# milliseconds, and some error or give warnings.
interval = int(interval)
self._interval = interval
self._timer_set_interval()
interval = property(_get_interval, _set_interval)
def _get_single_shot(self):
return self._single
def _set_single_shot(self, ss=True):
self._single = ss
self._timer_set_single_shot()
single_shot = property(_get_single_shot, _set_single_shot)
def add_callback(self, func, *args, **kwargs):
'''
Register `func` to be called by timer when the event fires. Any
additional arguments provided will be passed to `func`.
'''
self.callbacks.append((func, args, kwargs))
def remove_callback(self, func, *args, **kwargs):
'''
Remove `func` from list of callbacks. `args` and `kwargs` are optional
and used to distinguish between copies of the same function registered
to be called with different arguments.
'''
if args or kwargs:
self.callbacks.remove((func, args, kwargs))
else:
funcs = [c[0] for c in self.callbacks]
if func in funcs:
self.callbacks.pop(funcs.index(func))
def _timer_set_interval(self):
'Used to set interval on underlying timer object.'
pass
def _timer_set_single_shot(self):
'Used to set single shot on underlying timer object.'
pass
def _on_timer(self):
'''
Runs all function that have been registered as callbacks. Functions
can return False (or 0) if they should not be called any more. If there
are no callbacks, the timer is automatically stopped.
'''
for func, args, kwargs in self.callbacks:
ret = func(*args, **kwargs)
# docstring above explains why we use `if ret == False` here,
# instead of `if not ret`.
if ret == False:
self.callbacks.remove((func, args, kwargs))
if len(self.callbacks) == 0:
self.stop()
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas, guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class CloseEvent(Event):
"""
An event triggered by a figure being closed
In addition to the :class:`Event` attributes, the following event
attributes are defined:
"""
def __init__(self, name, canvas, guiEvent=None):
Event.__init__(self, name, canvas, guiEvent)
class LocationEvent(Event):
"""
An event that has a screen location
The following additional attributes are defined and shown with
their default values.
In addition to the :class:`Event` attributes, the following
event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y, guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas, guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
if self.canvas.mouse_grabber is None:
axes_list = [a for a in self.canvas.figure.get_axes()
if a.in_axes(self)]
else:
axes_list = [self.canvas.mouse_grabber]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axes_list.sort(key=lambda x: x.zorder)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
trans = self.inaxes.transData.inverted()
xdata, ydata = trans.transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes != self.inaxes:
# process axes enter/leave events
try:
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
except:
pass
# See ticket 2901582.
# I think this is a valid exception to the rule
# against catching all exceptions; if anything goes
# wrong, we simply want to move on and process the
# current event.
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events)
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
dblclick = None # whether or not the event is the result of a double click
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, dblclick=False, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
self.dblclick = dblclick
def __str__(self):
return ("MPL MouseEvent: xy=(%d,%d) xydata=(%s,%s) button=%s " +
"dblclick=%s inaxes=%s") % (self.x, self.y, self.xdata,
self.ydata, self.button,
self.dblclick, self.inaxes)
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- e.g., a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print('on pick line:', zip(xdata[ind], ydata[ind]))
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist,
guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key(s) pressed. Could be **None**, a single case sensitive ascii
character ("g", "G", "#", etc.), a special key
("control", "shift", "f1", "up", etc.) or a
combination of the above (e.g., "ctrl+alt+g", "ctrl+alt+G").
.. note::
Modifier keys will be prefixed to the pressed key and will be in the
order "ctrl", "alt", "super". The exception to this rule is when the
pressed key is itself a modifier key, therefore "ctrl+alt" and
"alt+control" can both be valid key values.
Example usage::
def on_key(event):
print('you pressed', event.key, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase(object):
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event',
'close_event'
]
supports_blit = True
fixed_dpi = None
filetypes = _default_filetypes
if _has_pil:
# JPEG support
register_backend('jpg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
register_backend('jpeg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
# TIFF support
register_backend('tif', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
register_backend('tiff', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry()
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event', self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event', self.pick)
self.mouse_grabber = None # the axes currently grabbing mouse
self.toolbar = None # NavigationToolbar2 will set me
self._is_saving = False
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event', self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def is_saving(self):
"""
Returns `True` when the renderer is in the process of saving
to a file, rather than rendering for an on-screen buffer.
"""
return self._is_saving
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [(h.zorder, h) for h in artists]
L.sort()
return [h for zorder, h in L]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under:
h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self, '_active'):
self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
#print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a, 'get_color'):
a.set_color(self._active[a])
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a, 'get_color'):
self._active[a] = a.get_color()
elif hasattr(a, 'get_edgecolor'):
self._active[a] = (a.get_edgecolor(), a.get_facecolor())
else:
self._active[a] = None
for a in enter:
if hasattr(a, 'get_color'):
a.set_color('red')
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else:
self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def close_event(self, guiEvent=None):
"""
This method will be called by all functions connected to the
'close_event' with a :class:`CloseEvent`
"""
s = 'close_event'
try:
event = CloseEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
except (TypeError, AttributeError):
pass
# Suppress the TypeError when the python session is being killed.
# It may be that a better solution would be a mechanism to
# disconnect all callbacks upon shutdown.
# AttributeError occurs on OSX with qt4agg upon exiting
# with an open window; 'callbacks' attribute no longer exists.
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, dblclick=False, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key,
dblclick=dblclick, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
self._lastx, self._lasty = None, None
def enter_notify_event(self, guiEvent=None, xy=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
*xy*
the coordinate location of the pointer when the canvas is
entered
"""
if xy is not None:
x, y = xy
self._lastx, self._lasty = x, y
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
"""Called when GUI is idle."""
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def grab_mouse(self, ax):
"""
Set the child axes which are currently grabbing the mouse events.
Usually called by the widgets themselves.
It is an error to call this if the mouse is already grabbed by
another axes.
"""
if self.mouse_grabber not in (None, ax):
raise RuntimeError('two different attempted to grab mouse input')
self.mouse_grabber = ax
def release_mouse(self, ax):
"""
Release the mouse grab held by the axes, ax.
Usually called by the widgets.
It is ok to call this even if you ax doesn't have the mouse
grab currently.
"""
if self.mouse_grabber is ax:
self.mouse_grabber = None
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
Return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
@classmethod
def get_supported_filetypes(cls):
"""Return dict of savefig file formats supported by this backend"""
return cls.filetypes
@classmethod
def get_supported_filetypes_grouped(cls):
"""Return a dict of savefig file formats supported by this backend,
where the keys are a file type name, such as 'Joint Photographic
Experts Group', and the values are a list of filename extensions used
for that filetype, such as ['jpg', 'jpeg']."""
groupings = {}
for ext, name in six.iteritems(cls.filetypes):
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def _get_output_canvas(self, format):
"""Return a canvas that is suitable for saving figures to a specified
file format. If necessary, this function will switch to a registered
backend that supports the format.
"""
method_name = 'print_%s' % format
# check if this canvas supports the requested format
if hasattr(self, method_name):
return self
# check if there is a default canvas for the requested format
canvas_class = get_registered_canvas_class(format)
if canvas_class:
return self.switch_backends(canvas_class)
# else report error for unsupported format
formats = sorted(self.get_supported_filetypes())
raise ValueError('Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation*
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
*bbox_inches*
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure. If None, use savefig.bbox
*pad_inches*
Amount of padding around the figure when bbox_inches is
'tight'. If None, use savefig.pad_inches
*bbox_extra_artists*
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
if format is None:
# get format from filename, or from backend's default filetype
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
# get canvas object and print method for format
canvas = self._get_output_canvas(format)
print_method = getattr(canvas, 'print_%s' % format)
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
bbox_inches = kwargs.pop("bbox_inches", None)
if bbox_inches is None:
bbox_inches = rcParams['savefig.bbox']
if bbox_inches:
# call adjust_bbox to save only the given area
if bbox_inches == "tight":
# when bbox_inches == "tight", it saves the figure
# twice. The first save command is just to estimate
# the bounding box of the figure. A stringIO object is
# used as a temporary file object, but it causes a
# problem for some backends (ps backend with
# usetex=True) if they expect a filename, not a
# file-like object. As I think it is best to change
# the backend to support file-like object, i'm going
# to leave it as it is. However, a better solution
# than stringIO seems to be needed. -JJL
#result = getattr(self, method_name)
result = print_method(
io.BytesIO(),
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
dryrun=True,
**kwargs)
renderer = self.figure._cachedRenderer
bbox_inches = self.figure.get_tightbbox(renderer)
bbox_artists = kwargs.pop("bbox_extra_artists", None)
if bbox_artists is None:
bbox_artists = self.figure.get_default_bbox_extra_artists()
bbox_filtered = []
for a in bbox_artists:
bbox = a.get_window_extent(renderer)
if a.get_clip_on():
clip_box = a.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = a.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox,
clip_path.get_extents())
if bbox is not None and (bbox.width != 0 or
bbox.height != 0):
bbox_filtered.append(bbox)
if bbox_filtered:
_bbox = Bbox.union(bbox_filtered)
trans = Affine2D().scale(1.0 / self.figure.dpi)
bbox_extra = TransformedBbox(_bbox, trans)
bbox_inches = Bbox.union([bbox_inches, bbox_extra])
pad = kwargs.pop("pad_inches", None)
if pad is None:
pad = rcParams['savefig.pad_inches']
bbox_inches = bbox_inches.padded(pad)
restore_bbox = tight_bbox.adjust_bbox(self.figure, bbox_inches,
canvas.fixed_dpi)
_bbox_inches_restore = (bbox_inches, restore_bbox)
else:
_bbox_inches_restore = None
self._is_saving = True
try:
#result = getattr(self, method_name)(
result = print_method(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
bbox_inches_restore=_bbox_inches_restore,
**kwargs)
finally:
if bbox_inches and restore_bbox:
restore_bbox()
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
self._is_saving = False
#self.figure.canvas.draw() ## seems superfluous
return result
@classmethod
def get_default_filetype(cls):
"""
Get the default savefig file format as specified in rcParam
``savefig.format``. Returned string excludes period. Overridden
in backends that only support a single file type.
"""
return rcParams['savefig.format']
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
return self.manager.get_window_title()
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def get_default_filename(self):
"""
Return a string, which includes extension, suitable for use as
a default filename.
"""
default_filename = self.get_window_title() or 'image'
default_filename = default_filename.lower().replace(' ', '_')
return default_filename + '.' + self.get_default_filetype()
def switch_backends(self, FigureCanvasClass):
"""
Instantiate an instance of FigureCanvasClass
This is used for backend switching, e.g., to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (e.g., setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
newCanvas._is_saving = self._is_saving
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
- 'figure_enter_event',
- 'figure_leave_event',
- 'axes_enter_event',
- 'axes_leave_event'
- 'close_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
Disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only for
backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerBase(*args, **kwargs)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self, timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str, mplDeprecation)
if timeout <= 0:
timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter * timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
def key_press_handler(event, canvas, toolbar=None):
"""
Implement the default mpl key bindings for the canvas and toolbar
described at :ref:`key-event-handling`
*event*
a :class:`KeyEvent` instance
*canvas*
a :class:`FigureCanvasBase` instance
*toolbar*
a :class:`NavigationToolbar2` instance
"""
# these bindings happen whether you are over an axes or not
if event.key is None:
return
# Load key-mappings from your matplotlibrc file.
fullscreen_keys = rcParams['keymap.fullscreen']
home_keys = rcParams['keymap.home']
back_keys = rcParams['keymap.back']
forward_keys = rcParams['keymap.forward']
pan_keys = rcParams['keymap.pan']
zoom_keys = rcParams['keymap.zoom']
save_keys = rcParams['keymap.save']
quit_keys = rcParams['keymap.quit']
grid_keys = rcParams['keymap.grid']
toggle_yscale_keys = rcParams['keymap.yscale']
toggle_xscale_keys = rcParams['keymap.xscale']
all = rcParams['keymap.all_axes']
# toggle fullscreen mode (default key 'f')
if event.key in fullscreen_keys:
canvas.manager.full_screen_toggle()
# quit the figure (defaut key 'ctrl+w')
if event.key in quit_keys:
Gcf.destroy_fig(canvas.figure)
if toolbar is not None:
# home or reset mnemonic (default key 'h', 'home' and 'r')
if event.key in home_keys:
toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in back_keys:
toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in forward_keys:
toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in pan_keys:
toolbar.pan()
toolbar._set_cursor(event)
# zoom mnemonic (default key 'o')
elif event.key in zoom_keys:
toolbar.zoom()
toolbar._set_cursor(event)
# saving current figure (default key 's')
elif event.key in save_keys:
toolbar.save_figure()
if event.inaxes is None:
return
# these bindings require the mouse to be over an axes to trigger
# switching on/off a grid in current axes (default key 'g')
if event.key in grid_keys:
event.inaxes.grid()
canvas.draw()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in toggle_yscale_keys:
ax = event.inaxes
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale == 'linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in toggle_xscale_keys:
ax = event.inaxes
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif scalex == 'linear':
ax.set_xscale('log')
ax.figure.canvas.draw()
elif (event.key.isdigit() and event.key != '0') or event.key in all:
# keys in list 'all' enables all axes (default key 'a'),
# otherwise if key is a number only enable this particular axes
# if it was the axes, where the event was raised
if not (event.key in all):
n = int(event.key) - 1
for i, a in enumerate(canvas.figure.get_axes()):
# consider axes, in which the event was raised
# FIXME: Why only this axes?
if event.x is not None and event.y is not None \
and a.in_axes(event):
if event.key in all:
a.set_navigate(True)
else:
a.set_navigate(i == n)
class NonGuiException(Exception):
pass
class FigureManagerBase(object):
"""
Helper class for pyplot mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure number
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.key_press_handler_id = self.canvas.mpl_connect('key_press_event',
self.key_press)
"""
The returned id from connecting the default key handler via
:meth:`FigureCanvasBase.mpl_connnect`.
To disable default key press handling::
manager, canvas = figure.canvas.manager, figure.canvas
canvas.mpl_disconnect(manager.key_press_handler_id)
"""
def show(self):
"""
For GUI backends, show the figure window and redraw.
For non-GUI backends, raise an exception to be caught
by :meth:`~matplotlib.figure.Figure.show`, for an
optional warning.
"""
raise NonGuiException()
def destroy(self):
pass
def full_screen_toggle(self):
pass
def resize(self, w, h):
""""For gui backends, resize the window (in pixels)."""
pass
def key_press(self, event):
"""
Implement the default mpl key bindings defined at
:ref:`key-event-handling`
"""
key_press_handler(event, self.canvas, self.canvas.toolbar)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None for non-GUI backends (e.g., a PS backend).
"""
return 'image'
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect for non-GUI backends (e.g., a PS backend).
"""
pass
class Cursors:
# this class is only used as a simple namespace
HAND, POINTER, SELECT_REGION, MOVE = list(range(4))
cursors = Cursors()
class NavigationToolbar2(object):
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
# list of toolitems to add to the toolbar, format is:
# (
# text, # the text of the button (often not visible to users)
# tooltip_text, # the tooltip shown on hover (where possible)
# image_file, # name of the image for the button (without the extension)
# name_of_method, # name of the method in NavigationToolbar2 to call
# )
toolitems = (
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
('Save', 'Save the figure', 'filesave', 'save_figure'),
)
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time
# of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
self._ids_zoom = []
self._zoom_mode = None
self._button_pressed = None # determined by the button pressed
# at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
"""Display a message on toolbar or in status bar"""
pass
def back(self, *args):
"""move back up the view lim stack"""
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
"""Draw a rectangle rubberband to indicate zoom limits"""
pass
def forward(self, *args):
"""Move forward in the view lim stack"""
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
"""Restore the original view"""
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def _set_cursor(self, event):
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active == 'ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
elif (self._active == 'PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
def mouse_move(self, event):
self._set_cursor(event)
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else:
self.set_message(self.mode)
def pan(self, *args):
"""Activate the pan/zoom tool. pan with left button, zoom with right"""
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
"""Called whenver a mouse button is pressed."""
pass
def press_pan(self, event):
"""the press mouse button in pan/zoom mode callback"""
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect('motion_notify_event',
self.drag_pan)
self.press(event)
def press_zoom(self, event):
"""the press mouse button in zoom to rect mode callback"""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom != []:
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self.release(event)
self.draw()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a.viewLim.frozen(),
a.transData.frozen()))
id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)
id2 = self.canvas.mpl_connect('key_press_event',
self._switch_on_zoom_mode)
id3 = self.canvas.mpl_connect('key_release_event',
self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
self.press(event)
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self.mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self.mouse_move(event)
def push_current(self):
"""push the current view limits and position onto the stack"""
lims = []
pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append((xmin, xmax, ymin, ymax))
# Store both the original and modified positions
pos.append((
a.get_position(True).frozen(),
a.get_position().frozen()))
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
"""this will be called whenever mouse button is released"""
pass
def release_pan(self, event):
"""the release mouse button callback in pan/zoom mode"""
if self._button_pressed is None:
return
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress:
return
self._xypress = []
self._button_pressed = None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
"""the drag callback in pan/zoom mode"""
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def drag_zoom(self, event):
"""the drag callback in zoom mode"""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
# adjust x, last, y, last
x1, y1, x2, y2 = a.bbox.extents
x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)
y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)
if self._zoom_mode == "x":
x1, y1, x2, y2 = a.bbox.extents
y, lasty = y1, y2
elif self._zoom_mode == "y":
x1, y1, x2, y2 = a.bbox.extents
x, lastx = x1, x2
self.draw_rubberband(event, x, y, lastx, lasty)
def release_zoom(self, event):
"""the release mouse button callback in zoom to rect mode"""
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
if not self._xypress:
return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x - lastx) < 5 or abs(y - lasty) < 5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point((lastx, lasty))
x, y = inverse.transform_point((x, y))
Xmin, Xmax = a.get_xlim()
Ymin, Ymax = a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a, la):
twinx = True
if a.get_shared_y_axes().joined(a, la):
twiny = True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x < lastx:
x0, x1 = x, lastx
else:
x0, x1 = lastx, x
if x0 < Xmin:
x0 = Xmin
if x1 > Xmax:
x1 = Xmax
else:
if x > lastx:
x0, x1 = x, lastx
else:
x0, x1 = lastx, x
if x0 > Xmin:
x0 = Xmin
if x1 < Xmax:
x1 = Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y < lasty:
y0, y1 = y, lasty
else:
y0, y1 = lasty, y
if y0 < Ymin:
y0 = Ymin
if y1 > Ymax:
y1 = Ymax
else:
if y > lasty:
y0, y1 = y, lasty
else:
y0, y1 = lasty, y
if y0 > Ymin:
y0 = Ymin
if y1 < Ymax:
y1 = Ymax
if self._button_pressed == 1:
if self._zoom_mode == "x":
a.set_xlim((x0, x1))
elif self._zoom_mode == "y":
a.set_ylim((y0, y1))
else:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale() == 'log':
alpha = np.log(Xmax / Xmin) / np.log(x1 / x0)
rx1 = pow(Xmin / x0, alpha) * Xmin
rx2 = pow(Xmax / x0, alpha) * Xmin
else:
alpha = (Xmax - Xmin) / (x1 - x0)
rx1 = alpha * (Xmin - x0) + Xmin
rx2 = alpha * (Xmax - x0) + Xmin
if a.get_yscale() == 'log':
alpha = np.log(Ymax / Ymin) / np.log(y1 / y0)
ry1 = pow(Ymin / y0, alpha) * Ymin
ry2 = pow(Ymax / y0, alpha) * Ymin
else:
alpha = (Ymax - Ymin) / (y1 - y0)
ry1 = alpha * (Ymin - y0) + Ymin
ry2 = alpha * (Ymax - y0) + Ymin
if self._zoom_mode == "x":
a.set_xlim((rx1, rx2))
elif self._zoom_mode == "y":
a.set_ylim((ry1, ry2))
else:
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self._zoom_mode = None
self.push_current()
self.release(event)
def draw(self):
"""Redraw the canvases, update the locators"""
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw_idle()
def _update_view(self):
"""Update the viewlim and position from the view and
position stack for each axes
"""
lims = self._views()
if lims is None:
return
pos = self._positions()
if pos is None:
return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position(pos[i][0], 'original')
a.set_position(pos[i][1], 'active')
self.canvas.draw_idle()
def save_figure(self, *args):
"""Save the current figure"""
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
"""Reset the axes stack"""
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
"""Activate zoom to rect mode"""
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event',
self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event',
self.release_zoom)
self.mode = 'zoom rect'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
"""Enable or disable back/forward button"""
pass
| mit |
costypetrisor/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
cpcloud/ibis | ibis/expr/window.py | 1 | 15482 | """Encapsulation of SQL window clauses."""
import functools
from typing import NamedTuple, Union
import numpy as np
import pandas as pd
import ibis.common.exceptions as com
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.util as util
def _sequence_to_tuple(x):
return tuple(x) if util.is_iterable(x) else x
RowsWithMaxLookback = NamedTuple('RowsWithMaxLookback',
[('rows', Union[int, np.integer]),
('max_lookback', ir.IntervalValue)]
)
def _choose_non_empty_val(first, second):
if isinstance(first, (int, np.integer)) and first:
non_empty_value = first
elif not isinstance(first, (int, np.integer)) and first is not None:
non_empty_value = first
else:
non_empty_value = second
return non_empty_value
def _determine_how(preceding):
offset_type = type(_get_preceding_value(preceding))
if issubclass(offset_type, (int, np.integer)):
how = 'rows'
elif issubclass(offset_type, ir.IntervalScalar):
how = 'range'
else:
raise TypeError(
'Type {} is not supported for row- or range- based trailing '
'window operations'.format(offset_type)
)
return how
@functools.singledispatch
def _get_preceding_value(preceding):
raise TypeError(
"Type {} is not a valid type for 'preceding' "
"parameter".format(type(preceding))
)
@_get_preceding_value.register(tuple)
def _get_preceding_value_tuple(preceding):
start, end = preceding
if start is None:
preceding_value = end
else:
preceding_value = start
return preceding_value
@_get_preceding_value.register(int)
@_get_preceding_value.register(np.integer)
@_get_preceding_value.register(ir.IntervalScalar)
def _get_preceding_value_simple(preceding):
return preceding
@_get_preceding_value.register(RowsWithMaxLookback)
def _get_preceding_value_mlb(preceding):
preceding_value = preceding.rows
if not isinstance(preceding_value, (int, np.integer)):
raise TypeError("'Rows with max look-back' only supports integer "
"row-based indexing.")
return preceding_value
class Window:
"""Class to encapsulate the details of a window frame.
Notes
-----
This class is patterned after SQL window clauses.
Using None for preceding or following currently indicates unbounded. Use 0
for ``CURRENT ROW``.
"""
def __init__(
self,
group_by=None,
order_by=None,
preceding=None,
following=None,
max_lookback=None,
how='rows',
):
if group_by is None:
group_by = []
if order_by is None:
order_by = []
self._group_by = util.promote_list(group_by)
self._order_by = []
for x in util.promote_list(order_by):
if isinstance(x, ir.SortExpr):
pass
elif isinstance(x, ir.Expr):
x = ops.SortKey(x).to_expr()
self._order_by.append(x)
if isinstance(preceding, RowsWithMaxLookback):
# the offset interval is used as the 'preceding' value of a window
# while 'rows' is used to adjust the window created using offset
self.preceding = preceding.max_lookback
self.max_lookback = preceding.rows
else:
self.preceding = _sequence_to_tuple(preceding)
self.max_lookback = max_lookback
self.following = _sequence_to_tuple(following)
self.how = how
self._validate_frame()
def __hash__(self) -> int:
return hash(
(
tuple(gb.op() for gb in self._group_by),
tuple(ob.op() for ob in self._order_by),
self.preceding,
self.following,
self.how,
)
)
def _validate_frame(self):
preceding_tuple = has_preceding = False
following_tuple = has_following = False
if self.preceding is not None:
preceding_tuple = isinstance(self.preceding, tuple)
has_preceding = True
if self.following is not None:
following_tuple = isinstance(self.following, tuple)
has_following = True
if (preceding_tuple and has_following) or (
following_tuple and has_preceding
):
raise com.IbisInputError(
'Can only specify one window side when you want an '
'off-center window'
)
elif preceding_tuple:
start, end = self.preceding
if end is None:
raise com.IbisInputError("preceding end point cannot be None")
if end < 0:
raise com.IbisInputError(
"preceding end point must be non-negative"
)
if start is not None:
if start < 0:
raise com.IbisInputError(
"preceding start point must be non-negative"
)
if start <= end:
raise com.IbisInputError(
"preceding start must be greater than preceding end"
)
elif following_tuple:
start, end = self.following
if start is None:
raise com.IbisInputError(
"following start point cannot be None"
)
if start < 0:
raise com.IbisInputError(
"following start point must be non-negative"
)
if end is not None:
if end < 0:
raise com.IbisInputError(
"following end point must be non-negative"
)
if start >= end:
raise com.IbisInputError(
"following start must be less than following end"
)
else:
if not isinstance(self.preceding, ir.Expr):
if has_preceding and self.preceding < 0:
raise com.IbisInputError(
"'preceding' must be positive, got {}".format(
self.preceding
)
)
if not isinstance(self.following, ir.Expr):
if has_following and self.following < 0:
raise com.IbisInputError(
"'following' must be positive, got {}".format(
self.following
)
)
if self.how not in {'rows', 'range'}:
raise com.IbisInputError(
"'how' must be 'rows' or 'range', got {}".format(self.how)
)
if self.max_lookback is not None:
if not isinstance(
self.preceding, (ir.IntervalValue, pd.Timedelta)):
raise com.IbisInputError(
"'max_lookback' must be specified as an interval "
"or pandas.Timedelta object"
)
def bind(self, table):
# Internal API, ensure that any unresolved expr references (as strings,
# say) are bound to the table being windowed
groups = table._resolve(self._group_by)
sorts = [ops.to_sort_key(table, k) for k in self._order_by]
return self._replace(group_by=groups, order_by=sorts)
def combine(self, window):
if self.how != window.how:
raise com.IbisInputError(
(
"Window types must match. "
"Expecting '{}' Window, got '{}'"
).format(self.how.upper(), window.how.upper())
)
kwds = dict(
preceding=_choose_non_empty_val(self.preceding, window.preceding),
following=_choose_non_empty_val(self.following, window.following),
max_lookback=self.max_lookback or window.max_lookback,
group_by=self._group_by + window._group_by,
order_by=self._order_by + window._order_by,
)
return Window(**kwds)
def group_by(self, expr):
new_groups = self._group_by + util.promote_list(expr)
return self._replace(group_by=new_groups)
def _replace(self, **kwds):
new_kwds = dict(
group_by=kwds.get('group_by', self._group_by),
order_by=kwds.get('order_by', self._order_by),
preceding=kwds.get('preceding', self.preceding),
following=kwds.get('following', self.following),
max_lookback=kwds.get('max_lookback', self.max_lookback),
how=kwds.get('how', self.how),
)
return Window(**new_kwds)
def order_by(self, expr):
new_sorts = self._order_by + util.promote_list(expr)
return self._replace(order_by=new_sorts)
def equals(self, other, cache=None):
if cache is None:
cache = {}
if self is other:
cache[self, other] = True
return True
if not isinstance(other, Window):
cache[self, other] = False
return False
try:
return cache[self, other]
except KeyError:
pass
if len(self._group_by) != len(other._group_by) or not ops.all_equal(
self._group_by, other._group_by, cache=cache
):
cache[self, other] = False
return False
if len(self._order_by) != len(other._order_by) or not ops.all_equal(
self._order_by, other._order_by, cache=cache
):
cache[self, other] = False
return False
equal = ops.all_equal(
self.preceding, other.preceding, cache=cache
) and ops.all_equal(
self.following, other.following, cache=cache
) and ops.all_equal(
self.max_lookback, other.max_lookback, cache=cache
)
cache[self, other] = equal
return equal
def rows_with_max_lookback(rows, max_lookback):
"""Create a bound preceding value for use with trailing window functions"""
return RowsWithMaxLookback(rows, max_lookback)
def window(preceding=None, following=None, group_by=None, order_by=None):
"""Create a window clause for use with window functions.
This ROW window clause aggregates adjacent rows based on differences in row
number.
All window frames / ranges are inclusive.
Parameters
----------
preceding : int, tuple, or None, default None
Specify None for unbounded, 0 to include current row tuple for
off-center window
following : int, tuple, or None, default None
Specify None for unbounded, 0 to include current row tuple for
off-center window
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window
"""
return Window(
preceding=preceding,
following=following,
group_by=group_by,
order_by=order_by,
how='rows',
)
def range_window(preceding=None, following=None, group_by=None, order_by=None):
"""Create a range-based window clause for use with window functions.
This RANGE window clause aggregates rows based upon differences in the
value of the order-by expression.
All window frames / ranges are inclusive.
Parameters
----------
preceding : int, tuple, or None, default None
Specify None for unbounded, 0 to include current row tuple for
off-center window
following : int, tuple, or None, default None
Specify None for unbounded, 0 to include current row tuple for
off-center window
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window
"""
return Window(
preceding=preceding,
following=following,
group_by=group_by,
order_by=order_by,
how='range',
)
def cumulative_window(group_by=None, order_by=None):
"""Create a cumulative window for use with aggregate window functions.
All window frames / ranges are inclusive.
Parameters
----------
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window
"""
return Window(
preceding=None, following=0, group_by=group_by, order_by=order_by
)
def trailing_window(preceding, group_by=None, order_by=None):
"""Create a trailing window for use with aggregate window functions.
Parameters
----------
preceding : int, float or expression of intervals, i.e.
ibis.interval(days=1) + ibis.interval(hours=5)
Int indicates number of trailing rows to include;
0 includes only the current row.
Interval indicates a trailing range window.
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window
"""
how = _determine_how(preceding)
return Window(
preceding=preceding,
following=0,
group_by=group_by,
order_by=order_by,
how=how
)
def trailing_range_window(preceding, order_by, group_by=None):
"""Create a trailing time window for use with aggregate window functions.
Parameters
----------
preceding : float or expression of intervals, i.e.
ibis.interval(days=1) + ibis.interval(hours=5)
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
group_by : expressions, default None
Either specify here or with TableExpr.group_by
Returns
-------
Window
"""
return Window(
preceding=preceding,
following=0,
group_by=group_by,
order_by=order_by,
how='range',
)
def propagate_down_window(expr, window):
op = expr.op()
clean_args = []
unchanged = True
for arg in op.args:
if isinstance(arg, ir.Expr) and not isinstance(op, ops.WindowOp):
new_arg = propagate_down_window(arg, window)
if isinstance(new_arg.op(), ops.AnalyticOp):
new_arg = ops.WindowOp(new_arg, window).to_expr()
if arg is not new_arg:
unchanged = False
arg = new_arg
clean_args.append(arg)
if unchanged:
return expr
else:
return type(op)(*clean_args).to_expr()
| apache-2.0 |
econpy/google-ngrams | getngrams.py | 2 | 6725 | #!/usr/bin/env python
# -*- coding: utf-8 -*
from ast import literal_eval
from pandas import DataFrame # http://github.com/pydata/pandas
import re
import requests # http://github.com/kennethreitz/requests
import subprocess
import sys
corpora = dict(eng_us_2012=17, eng_us_2009=5, eng_gb_2012=18, eng_gb_2009=6,
chi_sim_2012=23, chi_sim_2009=11, eng_2012=15, eng_2009=0,
eng_fiction_2012=16, eng_fiction_2009=4, eng_1m_2009=1,
fre_2012=19, fre_2009=7, ger_2012=20, ger_2009=8, heb_2012=24,
heb_2009=9, spa_2012=21, spa_2009=10, rus_2012=25, rus_2009=12,
ita_2012=22)
def getNgrams(query, corpus, startYear, endYear, smoothing, caseInsensitive):
params = dict(content=query, year_start=startYear, year_end=endYear,
corpus=corpora[corpus], smoothing=smoothing,
case_insensitive=caseInsensitive)
if params['case_insensitive'] is False:
params.pop('case_insensitive')
if '?' in params['content']:
params['content'] = params['content'].replace('?', '*')
if '@' in params['content']:
params['content'] = params['content'].replace('@', '=>')
req = requests.get('http://books.google.com/ngrams/graph', params=params)
res = re.findall('var data = (.*?);\\n', req.text)
if res:
data = {qry['ngram']: qry['timeseries']
for qry in literal_eval(res[0])}
df = DataFrame(data)
df.insert(0, 'year', list(range(startYear, endYear + 1)))
else:
df = DataFrame()
return req.url, params['content'], df
def runQuery(argumentString):
arguments = argumentString.split()
query = ' '.join([arg for arg in arguments if not arg.startswith('-')])
if '?' in query:
query = query.replace('?', '*')
if '@' in query:
query = query.replace('@', '=>')
params = [arg for arg in arguments if arg.startswith('-')]
corpus, startYear, endYear, smoothing = 'eng_2012', 1800, 2000, 3
printHelp, caseInsensitive, allData = False, False, False
toSave, toPrint, toPlot = True, True, False
# parsing the query parameters
for param in params:
if '-nosave' in param:
toSave = False
elif '-noprint' in param:
toPrint = False
elif '-plot' in param:
toPlot = True
elif '-corpus' in param:
corpus = param.split('=')[1].strip()
elif '-startYear' in param:
startYear = int(param.split('=')[1])
elif '-endYear' in param:
endYear = int(param.split('=')[1])
elif '-smoothing' in param:
smoothing = int(param.split('=')[1])
elif '-caseInsensitive' in param:
caseInsensitive = True
elif '-alldata' in param:
allData = True
elif '-help' in param:
printHelp = True
else:
print(('Did not recognize the following argument: %s' % param))
if printHelp:
print('See README file.')
else:
if '*' in query and caseInsensitive is True:
caseInsensitive = False
notifyUser = True
warningMessage = "*NOTE: Wildcard and case-insensitive " + \
"searches can't be combined, so the " + \
"case-insensitive option was ignored."
elif '_INF' in query and caseInsensitive is True:
caseInsensitive = False
notifyUser = True
warningMessage = "*NOTE: Inflected form and case-insensitive " + \
"searches can't be combined, so the " + \
"case-insensitive option was ignored."
else:
notifyUser = False
url, urlquery, df = getNgrams(query, corpus, startYear, endYear,
smoothing, caseInsensitive)
if not allData:
if caseInsensitive is True:
for col in df.columns:
if col.count('(All)') == 1:
df[col.replace(' (All)', '')] = df.pop(col)
elif col.count(':chi_') == 1 or corpus.startswith('chi_'):
pass
elif col.count(':ger_') == 1 or corpus.startswith('ger_'):
pass
elif col.count(':heb_') == 1 or corpus.startswith('heb_'):
pass
elif col.count('(All)') == 0 and col != 'year':
if col not in urlquery.split(','):
df.pop(col)
if '_INF' in query:
for col in df.columns:
if '_INF' in col:
df.pop(col)
if '*' in query:
for col in df.columns:
if '*' in col:
df.pop(col)
if toPrint:
print((','.join(df.columns.tolist())))
for row in df.iterrows():
try:
print(('%d,' % int(row[1].values[0]) +
','.join(['%.12f' % s for s in row[1].values[1:]])))
except:
print((','.join([str(s) for s in row[1].values])))
queries = ''.join(urlquery.replace(',', '_').split())
if '*' in queries:
queries = queries.replace('*', 'WILDCARD')
if caseInsensitive is True:
word_case = 'caseInsensitive'
else:
word_case = 'caseSensitive'
filename = '%s-%s-%d-%d-%d-%s.csv' % (queries, corpus, startYear,
endYear, smoothing, word_case)
if toSave:
for col in df.columns:
if '>' in col:
df[col.replace('>', '>')] = df.pop(col)
df.to_csv(filename, index=False)
print(('Data saved to %s' % filename))
if toPlot:
try:
subprocess.call(['python', 'xkcd.py', filename])
except:
if not toSave:
print(('Currently, if you want to create a plot you ' +
'must also save the data. Rerun your query, ' +
'removing the -nosave option.'))
else:
print(('Plotting Failed: %s' % filename))
if notifyUser:
print(warningMessage)
if __name__ == '__main__':
argumentString = ' '.join(sys.argv[1:])
if argumentString == '':
argumentString = eval(input('Enter query (or -help):'))
else:
try:
runQuery(argumentString)
except:
print('An error occurred.')
| mit |
gregreen/legacypipe | py/legacypipe/write_initial_catalog.py | 1 | 3639 | from __future__ import print_function
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import numpy as np
from common import *
from tractor import *
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
parser.add_option('-b', '--brick', type=int, help='Brick ID to run: default %default',
default=377306)
parser.add_option('-s', '--sed-matched', action='store_true', default=False,
help='Run SED-matched filter?')
parser.add_option('--bands', default='grz', help='Bands to retrieve')
parser.add_option('-o', '--output', help='Output filename for catalog',
default='initial-cat.fits')
parser.add_option('--threads', type=int, help='Run multi-threaded')
parser.add_option('-W', type=int, default=3600, help='Target image width (default %default)')
parser.add_option('-H', type=int, default=3600, help='Target image height (default %default)')
if not (('BOSS_PHOTOOBJ' in os.environ) and ('PHOTO_RESOLVE' in os.environ)):
print('''$BOSS_PHOTOOBJ and $PHOTO_RESOLVE not set -- on NERSC, you can do:
export BOSS_PHOTOOBJ=/project/projectdirs/cosmo/data/sdss/pre13/eboss/photoObj.v5b
export PHOTO_RESOLVE=/project/projectdirs/cosmo/data/sdss/pre13/eboss/resolve/2013-07-29
To read SDSS files from the local filesystem rather than downloading them.
''')
opt,args = parser.parse_args()
brickid = opt.brick
bands = opt.bands
if opt.threads and opt.threads > 1:
from astrometry.util.multiproc import multiproc
mp = multiproc(opt.threads)
else:
mp = multiproc()
ps = None
plots = False
decals = Decals()
brick = decals.get_brick(brickid)
print('Chosen brick:')
brick.about()
targetwcs = wcs_for_brick(brick, W=opt.W, H=opt.H)
W,H = targetwcs.get_width(), targetwcs.get_height()
# Read SDSS sources
cat,T = get_sdss_sources(bands, targetwcs)
if opt.sed_matched:
# Read images
tims = decals.tims_touching_wcs(targetwcs, mp, mock_psf=True, bands=bands)
print('Rendering detection maps...')
detmaps, detivs = detection_maps(tims, targetwcs, bands, mp)
SEDs = sed_matched_filters(bands)
Tnew,newcat,nil = run_sed_matched_filters(SEDs, bands, detmaps, detivs,
(T.itx,T.ity), targetwcs)
T = merge_tables([T,Tnew], columns='fillzero')
cat.extend(newcat)
from desi_common import prepare_fits_catalog
TT = T.copy()
for k in ['itx','ity','index']:
TT.delete_column(k)
for col in TT.get_columns():
if not col in ['tx', 'ty', 'blob']:
TT.rename(col, 'sdss_%s' % col)
TT.brickid = np.zeros(len(TT), np.int32) + brickid
TT.objid = np.arange(len(TT)).astype(np.int32)
invvars = None
hdr = None
fs = None
cat.thawAllRecursive()
T2,hdr = prepare_fits_catalog(cat, invvars, TT, hdr, bands, fs)
# Unpack shape columns
T2.shapeExp_r = T2.shapeExp[:,0]
T2.shapeExp_e1 = T2.shapeExp[:,1]
T2.shapeExp_e2 = T2.shapeExp[:,2]
T2.shapeDev_r = T2.shapeExp[:,0]
T2.shapeDev_e1 = T2.shapeExp[:,1]
T2.shapeDev_e2 = T2.shapeExp[:,2]
T2.shapeExp_r_ivar = T2.shapeExp_ivar[:,0]
T2.shapeExp_e1_ivar = T2.shapeExp_ivar[:,1]
T2.shapeExp_e2_ivar = T2.shapeExp_ivar[:,2]
T2.shapeDev_r_ivar = T2.shapeExp_ivar[:,0]
T2.shapeDev_e1_ivar = T2.shapeExp_ivar[:,1]
T2.shapeDev_e2_ivar = T2.shapeExp_ivar[:,2]
T2.writeto(opt.output)
print('Wrote', opt.output)
| gpl-2.0 |
SuLab/scheduled-bots | scheduled_bots/phenotypes/mitodb_bot.py | 1 | 6364 | import argparse
import json
import os
from datetime import datetime
from itertools import groupby
from time import gmtime, strftime, strptime
import pandas as pd
from tqdm import tqdm
from scheduled_bots import PROPS, ITEMS
from wikidataintegrator import wdi_core, wdi_helpers, wdi_login
from wikidataintegrator.ref_handlers import update_retrieved_if_new_multiple_refs
from wikidataintegrator.wdi_helpers import PublicationHelper
from wikidataintegrator.wdi_helpers import try_write
__metadata__ = {
'name': 'MitoBot',
'maintainer': 'GSS',
'tags': ['disease', 'phenotype'],
'properties': [PROPS['symptoms']]
}
try:
from scheduled_bots.local import WDUSER, WDPASS
except ImportError:
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
class MitoBot:
def __init__(self, records, login, write=True, run_one=False):
"""
# records is a list of dicts that look like:
{'Added on(yyyy-mm-dd)': '2011-10-27',
'Organ system': 'nervous',
'Percent affected': '100 %',
'Pubmed id': 19696032,
'Symptom/sign': 'ataxia',
'disease': 606002,
'hpo': 'HP:0001251'}
"""
self.records = records
self.login = login
self.write = write
self.run_one = run_one
self.core_props = set()
self.append_props = [PROPS['symptoms']]
self.item_engine = self.make_item_engine()
def make_item_engine(self):
append_props = self.append_props
core_props = self.core_props
class SubCls(wdi_core.WDItemEngine):
def __init__(self, *args, **kwargs):
kwargs['fast_run'] = False
kwargs['ref_handler'] = update_retrieved_if_new_multiple_refs
kwargs['core_props'] = core_props
kwargs['append_value'] = append_props
super(SubCls, self).__init__(*args, **kwargs)
return SubCls
@staticmethod
def create_reference(omim, pmid, login=None):
"""
Reference is:
retrieved: date
stated in: links to pmid items
optional reference URL
"""
#
ref = [wdi_core.WDItemID(ITEMS['MitoDB'], PROPS['curator'], is_reference=True)]
t = strftime("+%Y-%m-%dT00:00:00Z", gmtime())
ref.append(wdi_core.WDTime(t, prop_nr=PROPS['retrieved'], is_reference=True))
pmid_qid, _, success = PublicationHelper(ext_id=pmid, id_type='pmid', source="europepmc").get_or_create(login)
if success is True:
ref.append(wdi_core.WDItemID(pmid_qid, PROPS['stated in'], is_reference=True))
ref_url = "http://mitodb.com/symptoms.php?oid={}&symptoms=Show"
ref.append(wdi_core.WDUrl(ref_url.format(omim), PROPS['reference URL'], is_reference=True))
return ref
@staticmethod
def create_qualifier(incidence):
q = []
if incidence:
q.append(wdi_core.WDQuantity(incidence, PROPS['incidence'], is_qualifier=True,
unit="http://www.wikidata.org/entity/" + ITEMS['percentage']))
pass
return q
def run_one_disease(self, disease_qid, records):
ss = []
for record in records:
incidence = float(record['Percent affected'][:-2])
pmid = record['Pubmed id']
phenotype_qid = record['phenotype_qid']
omim_id = record['disease']
refs = [self.create_reference(omim_id, pmid=pmid, login=self.login)]
qual = self.create_qualifier(incidence)
s = wdi_core.WDItemID(phenotype_qid, PROPS['symptoms'], references=refs, qualifiers=qual)
ss.append(s)
item = self.item_engine(wd_item_id=disease_qid, data=ss)
assert not item.create_new_item
try_write(item, record_id=disease_qid, record_prop=PROPS['symptoms'],
edit_summary="Add phenotype from mitodb", login=self.login, write=self.write)
def run(self):
if self.run_one:
d = [x for x in self.records if x['disease_qid'] == self.run_one]
if d:
print(d[0])
self.run_one_disease(d[0]['disease_qid'], d)
else:
raise ValueError("{} not found".format(self.run_one))
return None
self.records = sorted(self.records, key=lambda x: x['disease_qid'])
record_group = groupby(self.records, key=lambda x: x['disease_qid'])
for disease_qid, sub_records in tqdm(record_group):
self.run_one_disease(disease_qid, sub_records)
def main(write=True, run_one=None):
omim_qid = wdi_helpers.id_mapper(PROPS['OMIM ID'], prefer_exact_match=True, return_as_set=True)
omim_qid = {k: list(v)[0] for k, v in omim_qid.items() if len(v) == 1}
hpo_qid = wdi_helpers.id_mapper(PROPS['Human Phenotype Ontology ID'], prefer_exact_match=True, return_as_set=True)
hpo_qid = {k: list(v)[0] for k, v in hpo_qid.items() if len(v) == 1}
df = pd.read_csv("mitodb.csv", dtype=str)
df['disease_qid'] = df.disease.map(omim_qid.get)
df['phenotype_qid'] = df.hpo.map(hpo_qid.get)
df.dropna(subset=['disease_qid', 'phenotype_qid'], inplace=True)
records = df.to_dict("records")
login = wdi_login.WDLogin(user=WDUSER, pwd=WDPASS)
bot = MitoBot(records, login, write=write, run_one=run_one)
bot.run()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='run mitodb phenotype bot')
parser.add_argument('--dummy', help='do not actually do write', action='store_true')
parser.add_argument('--run-one', help='run one disease, by qid')
args = parser.parse_args()
log_dir = "./logs"
run_id = datetime.now().strftime('%Y%m%d_%H:%M')
__metadata__['run_id'] = run_id
log_name = '{}-{}.log'.format(__metadata__['name'], run_id)
if wdi_core.WDItemEngine.logger is not None:
wdi_core.WDItemEngine.logger.handles = []
wdi_core.WDItemEngine.setup_logging(log_dir=log_dir, log_name=log_name, header=json.dumps(__metadata__),
logger_name='mitodb')
main(write=not args.dummy, run_one=args.run_one)
| mit |
pbreach/pysd | tests/unit_test_utils.py | 2 | 7923 | from unittest import TestCase
import xarray as xr
import pandas as pd
from . import test_utils
import doctest
class TestUtils(TestCase):
def test_get_return_elements_subscirpts(self):
from pysd.utils import get_return_elements
self.assertEqual(
get_return_elements(["Inflow A[Entry 1,Column 1]",
"Inflow A[Entry 1,Column 2]"],
{'Inflow A': 'inflow_a'},
{'Dim1': ['Entry 1', 'Entry 2'],
'Dim2': ['Column 1', 'Column 2']}),
(['inflow_a'],
{'Inflow A[Entry 1,Column 1]': ('inflow_a', {'Dim1': ['Entry 1'],
'Dim2': ['Column 1']}),
'Inflow A[Entry 1,Column 2]': ('inflow_a', {'Dim1': ['Entry 1'],
'Dim2': ['Column 2']})}
)
)
def test_get_return_elements_realnames(self):
from pysd.utils import get_return_elements
self.assertEqual(
get_return_elements(["Inflow A",
"Inflow B"],
subscript_dict={'Dim1': ['Entry 1', 'Entry 2'],
'Dim2': ['Column 1', 'Column 2']},
namespace={'Inflow A': 'inflow_a',
'Inflow B': 'inflow_b'}),
(['inflow_a', 'inflow_b'],
{'Inflow A': ('inflow_a', {}),
'Inflow B': ('inflow_b', {})}
)
)
def test_get_return_elements_pysafe_names(self):
from pysd.utils import get_return_elements
self.assertEqual(
get_return_elements(["inflow_a",
"inflow_b"],
subscript_dict={'Dim1': ['Entry 1', 'Entry 2'],
'Dim2': ['Column 1', 'Column 2']},
namespace={'Inflow A': 'inflow_a',
'Inflow B': 'inflow_b'}),
(['inflow_a', 'inflow_b'],
{'inflow_a': ('inflow_a', {}),
'inflow_b': ('inflow_b', {})}
)
)
def test_make_flat_df(self):
from pysd.utils import make_flat_df
frames = [{'elem1': xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2']),
'elem2': xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2'])},
{'elem1': xr.DataArray([[2, 4, 6], [8, 10, 12], [14, 16, 19]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2']),
'elem2': xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2'])}]
return_addresses = {'Elem1[B,F]': ('elem1', {'Dim1': ['B'], 'Dim2': ['F']})}
df = pd.DataFrame([{'Elem1[B,F]': 6}, {'Elem1[B,F]': 12}])
resultdf = make_flat_df(frames, return_addresses)
test_utils.assert_frames_close(resultdf, df, rtol=.01)
def test_visit_addresses(self):
from pysd.utils import visit_addresses
frame = {'elem1': xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2']),
'elem2': xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2'])}
return_addresses = {'Elem1[B,F]': ('elem1', {'Dim1': ['B'], 'Dim2': ['F']})}
self.assertEqual(visit_addresses(frame, return_addresses),
{'Elem1[B,F]': 6})
def test_visit_addresses_nosubs(self):
from pysd.utils import visit_addresses
frame = {'elem1': 25, 'elem2': 13}
return_addresses = {'Elem1': ('elem1', {}),
'Elem2': ('elem2', {})}
self.assertEqual(visit_addresses(frame, return_addresses),
{'Elem1': 25, 'Elem2': 13})
def test_visit_addresses_return_array(self):
""" There could be cases where we want to
return a whole section of an array - ie, by passing in only part of
the simulation dictionary. in this case, we can't force to float..."""
from pysd.utils import visit_addresses
frame = {'elem1': xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2']),
'elem2': xr.DataArray([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2'])}
return_addresses = {'Elem1[A, Dim2]': ('elem1', {'Dim1': ['A'], 'Dim2': ['D', 'E', 'F']})}
actual = visit_addresses(frame, return_addresses)
expected = {'Elem1[A, Dim2]':
xr.DataArray([[1, 2, 3]],
{'Dim1': ['A'],
'Dim2': ['D', 'E', 'F']},
dims=['Dim1', 'Dim2']),
}
self.assertIsInstance(list(actual.values())[0], xr.DataArray)
self.assertEqual(actual['Elem1[A, Dim2]'].shape,
expected['Elem1[A, Dim2]'].shape)
# Todo: test that the values are equal
def test_make_coord_dict(self):
from pysd.utils import make_coord_dict
self.assertEqual(make_coord_dict(['Dim1', 'D'],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
terse=True),
{'Dim2': ['D']})
self.assertEqual(make_coord_dict(['Dim1', 'D'],
{'Dim1': ['A', 'B', 'C'],
'Dim2': ['D', 'E', 'F']},
terse=False),
{'Dim1': ['A', 'B', 'C'], 'Dim2': ['D']})
def test_find_subscript_name(self):
from pysd.utils import find_subscript_name
self.assertEqual(find_subscript_name({'Dim1': ['A', 'B'],
'Dim2': ['C', 'D', 'E'],
'Dim3': ['F', 'G', 'H', 'I']},
'D'),
'Dim2')
self.assertEqual(find_subscript_name({'Dim1': ['A', 'B'],
'Dim2': ['C', 'D', 'E'],
'Dim3': ['F', 'G', 'H', 'I']},
'Dim3'),
'Dim3')
def test_doctests(self):
import pysd.utils
doctest.DocTestSuite(pysd.utils)
| mit |
mayblue9/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
dpshelio/sunpy | examples/plotting/simple_differential_rotation.py | 1 | 3061 | """
============================
Simple Differential Rotation
============================
The Sun is known to rotate differentially, meaning that the rotation rate
near the poles (rotation period of approximately 35 days) is not the same as
the rotation rate near the equator (rotation period of approximately 25 days).
This is possible because the Sun is not a solid body. Though it is still poorly
understood, it is fairly well measured and must be taken into account
when comparing observations of features on the Sun over time.
A good review can be found in Beck 1999 Solar Physics 191, 47–70.
This example illustrates solar differential rotation.
"""
# sphinx_gallery_thumbnail_number = 2
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.time import TimeDelta
import sunpy.map
import sunpy.data.sample
from sunpy.physics.differential_rotation import diff_rot, solar_rotate_coordinate
##############################################################################
# Next lets explore solar differential rotation by replicating Figure 1
# in Beck 1999
latitudes = u.Quantity(np.arange(0, 90, 1), 'deg')
dt = 1 * u.day
rotation_rate = [diff_rot(dt, this_lat) / dt for this_lat in latitudes]
rotation_period = [360 * u.deg / this_rate for this_rate in rotation_rate]
fig = plt.figure()
plt.plot(np.sin(latitudes), [this_period.value for this_period in rotation_period])
plt.ylim(38, 24)
plt.ylabel('Rotation Period [{0}]'.format(rotation_period[0].unit))
plt.xlabel('Sin(Latitude)')
plt.title('Solar Differential Rotation Rate')
##############################################################################
# Next let's show how to this looks like on the Sun.
# Load in an AIA map:
aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
##############################################################################
# Let's define our starting coordinates
hpc_y = u.Quantity(np.arange(-700, 800, 100), u.arcsec)
hpc_x = np.zeros_like(hpc_y)
##############################################################################
# Let's define how many days in the future we want to rotate to
dt = TimeDelta(4*u.day)
future_date = aia_map.date + dt
##############################################################################
# Now let's plot the original and rotated positions on the AIA map.
fig = plt.figure()
ax = plt.subplot(projection=aia_map)
aia_map.plot()
ax.set_title('The effect of {0} days of differential rotation'.format(dt.to(u.day).value))
aia_map.draw_grid()
for this_hpc_x, this_hpc_y in zip(hpc_x, hpc_y):
start_coord = SkyCoord(this_hpc_x, this_hpc_y, frame=aia_map.coordinate_frame)
rotated_coord = solar_rotate_coordinate(start_coord, time=future_date)
coord = SkyCoord([start_coord.Tx, rotated_coord.Tx],
[start_coord.Ty, rotated_coord.Ty],
frame=aia_map.coordinate_frame)
ax.plot_coord(coord, 'o-')
plt.ylim(0, aia_map.data.shape[1])
plt.xlim(0, aia_map.data.shape[0])
plt.show()
| bsd-2-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/api/histogram_path.py | 1 | 2441 | """
========================================================
Building histograms using Rectangles and PolyCollections
========================================================
This example shows how to use a path patch to draw a bunch of
rectangles. The technique of using lots of Rectangle instances, or
the faster method of using PolyCollections, were implemented before we
had proper paths with moveto/lineto, closepoly etc in mpl. Now that
we have them, we can draw collections of regularly shaped objects with
homogeneous properties more efficiently with a PathCollection. This
example makes a histogram -- its more work to set up the vertex arrays
at the outset, but it should be much faster for large numbers of
objects
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
fig, ax = plt.subplots()
# Fixing random state for reproducibility
np.random.seed(19680801)
# histogram our data with numpy
data = np.random.randn(1000)
n, bins = np.histogram(data, 50)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
# we need a (numrects x numsides x 2) numpy array for the path helper
# function to build a compound path
XY = np.array([[left, left, right, right], [bottom, top, top, bottom]]).T
# get the Path object
barpath = path.Path.make_compound_path_from_polys(XY)
# make a patch out of it
patch = patches.PathPatch(barpath)
ax.add_patch(patch)
# update the view limits
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
pltshow(plt)
| mit |
eustislab/horton | scripts/horton-entanglement.py | 1 | 7461 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
import numpy as np, argparse
from horton import __version__
def plot(i12ind1, i12ind2, i12val, orbinit, orbfinal, thresh, s1index, s1value):
try:
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib.ticker import NullFormatter
except ImportError:
if log.do_warning:
log.warn('Skipping plots because matplotlib was not found.')
return
norb = orbfinal-orbinit
orbitals = np.arange(orbinit, orbfinal)
theta = 2 * np.pi * (orbitals-orbinit)/(norb)
r = 22*np.ones(norb,int)-3.00*((orbitals-orbinit)%3)
plt.figure(figsize=(10,5))
ax = plt.subplot(121, polar=True)
ax.grid(False)
ax.set_theta_zero_location("N")
ax.set_theta_direction(-1)
plt.plot(theta, r, 'o', markersize=12, alpha=0.2)
for i in range(len(orbitals)):
plt.annotate(
i+1+orbinit,
xy = (theta[i], r[i]), xytext = (0, 0),
textcoords = 'offset points', ha = 'center', va = 'bottom',
fontsize=8, fontweight='bold',
)
ax.yaxis.set_data_interval(0,22.5)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
legend = []
for ind in range(len(i12val)):
if i12val[ind] >= thresh:
if i12val[ind] >= 0.0001 and i12val[ind] < 0.001:
plt.plot([theta[i12ind1[ind]-orbinit], theta[i12ind2[ind]-orbinit]],
[r[i12ind1[ind]-orbinit], r[i12ind2[ind]-orbinit]],
':', lw=2, color='orange')
if i12val[ind] >= 0.001 and i12val[ind] < 0.01:
plt.plot([theta[i12ind1[ind]-orbinit], theta[i12ind2[ind]-orbinit]],
[r[i12ind1[ind]-orbinit], r[i12ind2[ind]-orbinit]],
'-.', lw=2, color='g')
if i12val[ind] >= 0.01 and i12val[ind] < 0.1:
plt.plot([theta[i12ind1[ind]-orbinit], theta[i12ind2[ind]-orbinit]],
[r[i12ind1[ind]-orbinit], r[i12ind2[ind]-orbinit]],
'--', lw=2, color='r')
if i12val[ind] >= 0.1:
plt.plot([theta[i12ind1[ind]-orbinit], theta[i12ind2[ind]-orbinit]],
[r[i12ind1[ind]-orbinit], r[i12ind2[ind]-orbinit]],
'-', lw=3, color='b')
blue_line = mlines.Line2D([], [], color='blue', marker='', lw=3, ls='-', label='0.1')
red_line = mlines.Line2D([], [], color='red', marker='', lw=2, ls='--', label='0.01')
green_line = mlines.Line2D([], [], color='green', marker='', ls='-.', lw=2, label='0.001')
orange_line = mlines.Line2D([], [], color='orange', marker='', ls=':', lw=2, label='0.0001')
if thresh >= 0.0001 and thresh < 0.001:
legend.append(blue_line)
legend.append(red_line)
legend.append(green_line)
legend.append(orange_line)
if thresh >= 0.001 and thresh < 0.01:
legend.append(blue_line)
legend.append(red_line)
legend.append(green_line)
if thresh >= 0.01 and thresh < 0.1:
legend.append(blue_line)
legend.append(red_line)
if thresh >= 0.1:
legend.append(blue_line)
plt.legend(handles=legend, loc='center', fancybox=True, fontsize=10)
plt.title('Mutual information')
ax2 = plt.subplot(122)
ax2.axis([orbinit, orbfinal, 0, 0.71])
ax2.vlines(s1index, [0], s1value, color='r', linewidth=2, linestyle='-')
plt.ylabel('single-orbital entropy')
plt.xlabel('Orbital index')
plt.plot(s1index, s1value, 'ro', markersize=8)
plt.savefig('orbital_entanglement.png', dpi=300)
def read_i12_data(orbinit, orbfinal, thresh):
index1 = np.array([])
index2 = np.array([])
value = np.array([])
with open("i12.dat") as f:
counter = 1
for line in f:
words = line.split()
if len(words) != 3:
raise IOError('Expecting 3 fields on each data line in i12.dat')
if float(words[2]) >= thresh and int(words[0]) >= orbinit and \
int(words[1]) >= orbinit and int(words[0]) <= orbfinal and \
int(words[1]) <= orbfinal:
index1 = np.append(index1, int(words[0])-1)
index2 = np.append(index2, int(words[1])-1)
value = np.append(value, float(words[2]))
return index1, index2, value
def read_s1_data(orbinit, orbfinal):
index = np.array([])
value = np.array([])
with open("s1.dat") as f:
for line in f:
words = line.split()
if len(words) != 2:
raise IOError('Expecting 2 fields on each data line in s1.dat')
index = np.append(index, int(words[0]))
value = np.append(value, float(words[1]))
if orbfinal:
newind = np.where(index>=(orbinit+1))
index = index[newind]
value = value[newind]
newind2 = np.where(index<=orbfinal)
index = index[newind2]
value = value[newind2]
return index, value
def parse_args():
parser = argparse.ArgumentParser(prog='horton-entanglement.py',
description='This script makes an orbital entanglement plot. It '
'assumes that the files s1.dat and i12.dat are present in '
'the current directory. These two files will be used to '
'create the figure orbital_entanglement.png.')
parser.add_argument('-V', '--version', action='version',
version="%%(prog)s (HORTON version %s)" % __version__)
parser.add_argument('threshold', type=float,
help='Orbitals with a mutual information below this threshold will not '
'be connected by a line.')
parser.add_argument('init_index', default=1, type=int, nargs='?',
help='The first orbital to be used for the plot. [default=%(default)s]')
parser.add_argument('final_index', default=None, type=int, nargs='?',
help='The last orbital to be used for the plot (inclusive). '
'[default=last orbital]')
return parser.parse_args()
def main():
args = parse_args()
orbinit = args.init_index - 1
orbfinal = args.final_index
# Read s1.dat and store data
s1index, s1value = read_s1_data(orbinit, orbfinal)
if orbfinal is None:
orbfinal = len(s1index)
# Read i12.dat and store data
i12index1, i12index2, i12value = read_i12_data(orbinit, orbfinal, args.threshold)
# Plot i12 graph
plt1 = plot(i12index1, i12index2, i12value, orbinit, orbfinal, args.threshold, s1index, s1value)
if __name__ == '__main__':
main()
| gpl-3.0 |
Eric89GXL/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 8 | 4308 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers between stucked
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = pl.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = pl.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
pl.xlabel('n_init')
pl.ylabel('inertia')
pl.legend(plots, legends)
pl.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = pl.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
pl.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
pl.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
pl.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
pl.show()
| bsd-3-clause |
florian-f/sklearn | examples/cluster/plot_segmentation_toy.py | 4 | 3309 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
:ref:`spectral_clustering` is used to separate the circles.
In these settings, the spectral clustering approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD
import numpy as np
import pylab as pl
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependant from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
pl.matshow(img)
pl.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
pl.matshow(img)
pl.matshow(label_im)
pl.show()
| bsd-3-clause |
person142/scipy | scipy/spatial/_plotutils.py | 8 | 6957 | import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt # type: ignore[import]
if ax is None:
fig = plt.figure()
ax = fig.gca()
return func(obj, ax=ax, **kw)
# As of matplotlib 2.0, the "hold" mechanism is deprecated.
# When matplotlib 1.x is no longer supported, this check can be removed.
was_held = getattr(ax, 'ishold', lambda: True)()
if was_held:
return func(obj, ax=ax, **kw)
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
margin = 0.1 * points.ptp(axis=0)
xy_min = points.min(axis=0) - margin
xy_max = points.max(axis=0) + margin
ax.set_xlim(xy_min[0], xy_max[0])
ax.set_ylim(xy_min[1], xy_max[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import Delaunay, delaunay_plot_2d
The Delaunay triangulation of a set of random points:
>>> points = np.random.rand(30, 2)
>>> tri = Delaunay(points)
Plot it:
>>> _ = delaunay_plot_2d(tri)
>>> plt.show()
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
x, y = tri.points.T
ax.plot(x, y, 'o')
ax.triplot(x, y, tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import ConvexHull, convex_hull_plot_2d
The convex hull of a random set of points:
>>> points = np.random.rand(30, 2)
>>> hull = ConvexHull(points)
Plot it:
>>> _ = convex_hull_plot_2d(hull)
>>> plt.show()
"""
from matplotlib.collections import LineCollection # type: ignore[import]
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
line_segments = [hull.points[simplex] for simplex in hull.simplices]
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='solid'))
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None, **kw):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
show_points: bool, optional
Add the Voronoi points to the plot.
show_vertices : bool, optional
Add the Voronoi vertices to the plot.
line_colors : string, optional
Specifies the line color for polygon boundaries
line_width : float, optional
Specifies the line width for polygon boundaries
line_alpha: float, optional
Specifies the line alpha for polygon boundaries
point_size: float, optional
Specifies the size of points
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
Examples
--------
Set of point:
>>> import matplotlib.pyplot as plt
>>> points = np.random.rand(10,2) #random
Voronoi diagram of the points:
>>> from scipy.spatial import Voronoi, voronoi_plot_2d
>>> vor = Voronoi(points)
using `voronoi_plot_2d` for visualisation:
>>> fig = voronoi_plot_2d(vor)
using `voronoi_plot_2d` for visualisation with enhancements:
>>> fig = voronoi_plot_2d(vor, show_vertices=False, line_colors='orange',
... line_width=2, line_alpha=0.6, point_size=2)
>>> plt.show()
"""
from matplotlib.collections import LineCollection
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
if kw.get('show_points', True):
point_size = kw.get('point_size', None)
ax.plot(vor.points[:,0], vor.points[:,1], '.', markersize=point_size)
if kw.get('show_vertices', True):
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
line_colors = kw.get('line_colors', 'k')
line_width = kw.get('line_width', 1.0)
line_alpha = kw.get('line_alpha', 1.0)
center = vor.points.mean(axis=0)
ptp_bound = vor.points.ptp(axis=0)
finite_segments = []
infinite_segments = []
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
finite_segments.append(vor.vertices[simplex])
else:
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
if (vor.furthest_site):
direction = -direction
far_point = vor.vertices[i] + direction * ptp_bound.max()
infinite_segments.append([vor.vertices[i], far_point])
ax.add_collection(LineCollection(finite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='solid'))
ax.add_collection(LineCollection(infinite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='dashed'))
_adjust_bounds(ax, vor.points)
return ax.figure
| bsd-3-clause |
michigraber/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
PatrickOReilly/scikit-learn | examples/tree/plot_tree_regression.py | 95 | 1516 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
wronk/mne-python | examples/time_frequency/plot_source_space_time_frequency.py | 9 | 2323 | """
===================================================
Compute induced power in the source space with dSPM
===================================================
Returns STC files ie source estimates of induced power
for different bands in the source space. The inverse method
is linear based on dSPM inverse operator.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_band_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax, event_id = -0.2, 0.5, 1
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
events = events[:10] # take 10 events to keep the computation time low
# Use linear detrend to reduce any edge artifacts
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
preload=True, detrend=1)
# Compute a source estimate per frequency band
bands = dict(alpha=[9, 11], beta=[18, 22])
stcs = source_band_induced_power(epochs, inverse_operator, bands, n_cycles=2,
use_fft=False, n_jobs=1)
for b, stc in stcs.iteritems():
stc.save('induced_power_%s' % b)
###############################################################################
# plot mean power
plt.plot(stcs['alpha'].times, stcs['alpha'].data.mean(axis=0), label='Alpha')
plt.plot(stcs['beta'].times, stcs['beta'].data.mean(axis=0), label='Beta')
plt.xlabel('Time (ms)')
plt.ylabel('Power')
plt.legend()
plt.title('Mean source induced power')
plt.show()
| bsd-3-clause |
JPFrancoia/scikit-learn | sklearn/gaussian_process/gpr.py | 13 | 18747 | """Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations
and reduce potential numerical issue during fitting. If an array is
passed, it must have the same number of entries as the data used for
fitting and is used as datapoint-dependent noise level. Note that this
is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
the noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self.y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self.y_train_mean
else:
self.y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self.rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
self.L_ = cholesky(K, lower=True) # Line 2
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = self.kernel(X)
return y_mean, y_cov
elif return_std:
y_var = self.kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self.y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
| bsd-3-clause |
adrn/streams | streams/io/tests/test_sgr.py | 1 | 5403 | # coding: utf-8
""" Test different reading data from different mass runs """
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
import os, sys
# Third-party
import astropy.units as u
from astropy.constants import G
from astropy.io.misc import fnpickle
import matplotlib.pyplot as plt
import numpy as np
import pytest
from ... import usys
from ..sgr import SgrSimulation
from ...coordinates.frame import heliocentric
plot_path = "plots/tests/io/sgr"
if not os.path.exists(plot_path):
os.makedirs(plot_path)
p_kwargs = dict(marker='.', linestyle='none', color='k', alpha=0.1)
s_kwargs = dict(marker='o', linestyle='none', color='r', alpha=0.75,
markersize=10)
l_kwargs = dict(marker='^', linestyle='none', color='g', alpha=0.75,
markersize=10)
class Test10E8(object):
def setup_class(self):
self.sgr = SgrSimulation("sgr_plummer/2.5e8", "SNAP")
particles = self.sgr.particles(expr="tub==0")
self.particles = particles.decompose(usys)
satellite = self.sgr.satellite()
self.satellite = satellite.decompose(usys)
# Here are the true parameters from the SCFCEN file
r0 = np.array([36.82173, 2.926886, -4.172226])*self.sgr.units['length']
v0 = np.array([4.654394, -0.9905948, 5.080418])*self.sgr.units['speed']
self.true_r = np.squeeze(r0.decompose(usys).value)
self.true_v = np.squeeze(v0.decompose(usys).value)
def test_position(self):
fig,axes = plt.subplots(2, 2, figsize=(10,10))
axes[0,1].set_visible(False)
axes[0,0].plot(self.particles["x"].value,
self.particles["y"].value,
label="all particles", **p_kwargs)
axes[1,0].plot(self.particles["x"].value,
self.particles["z"].value,
**p_kwargs)
axes[1,1].plot(self.particles["y"].value,
self.particles["z"].value,
**p_kwargs)
axes[0,0].plot(self.satellite["x"].value,
self.satellite["y"].value,
label="Satellite", **s_kwargs)
axes[1,0].plot(self.satellite["x"].value,
self.satellite["z"].value,
**s_kwargs)
axes[1,1].plot(self.satellite["y"].value,
self.satellite["z"].value,
**s_kwargs)
axes[0,0].plot(self.true_r[0], self.true_r[1], label="Law",
**l_kwargs)
axes[1,0].plot(self.true_r[0], self.true_r[2], **l_kwargs)
axes[1,1].plot(self.true_r[1], self.true_r[2], **l_kwargs)
sz = 2
axes[0,0].set_xlim(self.true_r[0]-sz, self.true_r[0]+sz)
axes[0,0].set_ylim(self.true_r[1]-sz, self.true_r[1]+sz)
axes[1,0].set_xlim(self.true_r[0]-sz, self.true_r[0]+sz)
axes[1,0].set_ylim(self.true_r[2]-sz, self.true_r[2]+sz)
axes[1,1].set_xlim(self.true_r[1]-sz, self.true_r[1]+sz)
axes[1,1].set_ylim(self.true_r[2]-sz, self.true_r[2]+sz)
axes[0,0].legend(fontsize=10)
fig.subplots_adjust(hspace=0.02,wspace=0.02)
fig.savefig(os.path.join(plot_path, "sat_ptcl_positions_2.5e8.png"))
def test_velocity(self):
fig,axes = plt.subplots(2, 2, figsize=(10,10))
axes[0,1].set_visible(False)
axes[0,0].plot(self.particles["vx"].value,
self.particles["vy"].value,
label="all particles", **p_kwargs)
axes[1,0].plot(self.particles["vx"].value,
self.particles["vz"].value,
**p_kwargs)
axes[1,1].plot(self.particles["vy"].value,
self.particles["vz"].value,
**p_kwargs)
axes[0,0].plot(self.satellite["vx"].value,
self.satellite["vy"].value,
label="Satellite", **s_kwargs)
axes[1,0].plot(self.satellite["vx"].value,
self.satellite["vz"].value,
**s_kwargs)
axes[1,1].plot(self.satellite["vy"].value,
self.satellite["vz"].value,
**s_kwargs)
axes[0,0].plot(self.true_v[0], self.true_v[1], label="Law", **l_kwargs)
axes[1,0].plot(self.true_v[0], self.true_v[2], **l_kwargs)
axes[1,1].plot(self.true_v[1], self.true_v[2], **l_kwargs)
sz = (50*u.km/u.s).decompose(usys).value
axes[0,0].set_xlim(self.true_v[0]-sz, self.true_v[0]+sz)
axes[0,0].set_ylim(self.true_v[1]-sz, self.true_v[1]+sz)
axes[1,0].set_xlim(self.true_v[0]-sz, self.true_v[0]+sz)
axes[1,0].set_ylim(self.true_v[2]-sz, self.true_v[2]+sz)
axes[1,1].set_xlim(self.true_v[1]-sz, self.true_v[1]+sz)
axes[1,1].set_ylim(self.true_v[2]-sz, self.true_v[2]+sz)
axes[0,0].legend(fontsize=10)
fig.subplots_adjust(hspace=0.02,wspace=0.02)
fig.savefig(os.path.join(plot_path, "sat_ptcl_velocities_2.5e8.png"))
def test_pickle(self):
particles = self.sgr.particles(n=25, expr="tub==0")
fnpickle(particles, os.path.join(plot_path, "test.pickle"))
p = particles.to_frame(heliocentric)
fnpickle(p, os.path.join(plot_path, "test2.pickle")) | mit |
airanmehr/bio | Scripts/TimeSeriesPaper/Simulation/msmsData2.py | 1 | 3534 | '''
Copyleft Jul 24, 2016 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: [email protected]
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import pylab as plt;
import os;
import seaborn as sns
home = os.path.expanduser('~') + '/'
import Utils.Util as utl
import Utils.Estimate as est
import Utils.Simulation as Simulation
import multiprocessing
path = utl.simoutpath + 'MSMSSelectionPlot/'
def createData():
Ne = 1e4;
if not os.path.exists(path): os.makedirs(path)
s = 0.1
T = int(2 * np.log(2 * Ne * s) / s)
window_size = 5e4;
mu = 2 * 1e-8;
r = 2 * 1e-8;
n = 200;
posUnderSelection = 0.5;
step = 10
theta = 4 * Ne * mu * window_size;
rho = 4 * Ne * r * window_size;
numReplicates = 1;
T = (np.round(T / 100, 2) + 3) * 100
gens = np.arange(1, 1 + T + 1, step);
msms = 'java -jar -Xmx2g ' + utl.home + 'bin/msms/lib/msms.jar'
def one(i):
global gens
origin_count = 1.
fname = path + 'sim{}.OC{:.0f}.s{:.0E}.g'.format(i, origin_count, s)
Simulation.MSMSSelection(msms, Ne, n, numReplicates, theta, rho, window_size, s, origin_count,
posUnderSelection, gens, fname)
print i, s, origin_count, Ne, theta # ,gens,Simulation.MSMS.load(fname +'{}.msms'.format(start))[0][25000].mean(),Simulation.MSMS.load(fname +'{}.msms'.format(start+50))[0][25000].mean()
fname = path + 'sim{}.OC{:.0f}.s{:.0E}.g'.format(i, origin_count, 0)
Simulation.MSMSSelection(msms, Ne, n, numReplicates, theta, rho, window_size, 0, origin_count,
posUnderSelection, gens, fname)
multiprocessing.Pool(4).map(one, range(100))
files = utl.files(path)
df = pd.DataFrame(map(lambda x: x.split('.'), files))
res = []
for name, a in df.groupby([0, 2]):
# for f in a.apply(lambda x: '.'.join(x), axis=1):print f
data = pd.concat([(Simulation.MSMS.load(path + f)[0].mean()) for f in a.apply(lambda x: '.'.join(x), axis=1)],
axis=1)
data.columns = a.iloc[:, 3].apply(lambda x: int(x[1:]))
data.columns.name = 'gen'
data = data.T.sort_index()
res += [((float(name[1][1:]), int(name[0][3:])), data)]
df = pd.Series(list(zip(*res)[1]), index=pd.MultiIndex.from_tuples(zip(*res)[0], names=['s', 'i']))
f = lambda x: est.Estimate.getAllEstimatesX(x=x.dropna(), n=200).set_index('method')
def ff(i, s):
dff = pd.concat([f(x) for g, x in df.loc[s][i].iterrows()], axis=1)
dff.columns = df.loc[s][i].index
dff = dff.T
dff['i'] = i
dff.set_index('i', append=True, inplace=True)
return dff
sel = pd.concat(map(lambda x: ff(x, 0.1), range(100))).reset_index()
neut = pd.concat(map(lambda x: ff(x, 0.1), range(100))).reset_index()
nu = df.loc[0.1].apply(lambda x: x[25000]);
nu.iloc[:, 20:] = nu.iloc[:, 20:].replace({0: 1})
plt.subplot(4, 1, 1)
sns.tsplot(data=nu.stack().reset_index(), time='gen', value=0, unit='i')
plt.subplot(4, 1, 2)
sns.tsplot(data=sel, time='gen', value='TajimaD', unit='i');
sns.tsplot(data=neut, time='gen', value='TajimaD', unit='i');
plt.subplot(4, 1, 3)
sns.tsplot(data=sel, time='gen', value='FayWu', unit='i');
sns.tsplot(data=neut, time='gen', value='FayWu', unit='i');
plt.subplot(4, 1, 4)
sns.tsplot(data=sel, time='gen', value='SFSelect', unit='i');
sns.tsplot(data=neut, time='gen', value='SFSelect', unit='i');
| mit |
suranap/qiime | qiime/compare_distance_matrices.py | 15 | 11259 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["Jai Ram Rideout", "Michael Dwan", "Logan Knecht",
"Damien Coy", "Levi McCracken", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "[email protected]"
from os import path
from skbio.stats import p_value_to_str
from skbio.stats.distance import DistanceMatrix, mantel
from qiime.util import make_compatible_distance_matrices
from qiime.stats import MantelCorrelogram, PartialMantel
def run_mantel_test(method, fps, distmats, num_perms, tail_type, comment,
control_dm_fp=None, control_dm=None,
sample_id_map=None):
"""Runs a Mantel test on all pairs of distance matrices.
Returns a string suitable for writing out to a file containing the results
of the test.
WARNING: Only symmetric, hollow distance matrices may be used as input.
Asymmetric distance matrices, such as those obtained by the UniFrac Gain
metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input.
Arguments:
method - which Mantel test to run (either 'mantel' or 'partial_mantel')
fps - list of filepaths of the distance matrices
distmats - list of tuples containing dm labels and dm data (i.e. the
output of parse_distmat)
num_perms - the number of permutations to use to calculate the
p-value(s)
tail_type - the type of tail test to use when calculating the
p-value(s). Can be 'two-sided', 'greater', or 'less'. Only applies
when method is mantel
comment - comment string to add to the beginning of the results string
control_dm_fp - filepath of the control distance matrix. Only applies
when method is partial_mantel (it is required then)
control_dm - tuple containing control distance matrix labels and matrix
data. Only applies when method is partial_mantel (it is required
then)
sample_id_map - dict mapping sample IDs (i.e. what is expected by
make_compatible_distance_matrices)
"""
if len(fps) != len(distmats):
raise ValueError("Must provide the same number of filepaths as there "
"are distance matrices.")
if comment is None:
comment = ''
result = comment
if method == 'mantel':
result += 'DM1\tDM2\tNumber of entries\tMantel r statistic\t' + \
'p-value\tNumber of permutations\tTail type\n'
elif method == 'partial_mantel':
if not control_dm_fp or not control_dm:
raise ValueError("You must provide a control matrix filepath and "
"control matrix when running the partial Mantel "
"test.")
result += 'DM1\tDM2\tCDM\tNumber of entries\t' + \
'Mantel r statistic\tp-value\tNumber of permutations\t' +\
'Tail type\n'
else:
raise ValueError("Invalid method '%s'. Must be either 'mantel' or "
"'partial_mantel'." % method)
# Loop over all pairs of dms.
for i, (fp1, (dm1_labels, dm1_data)) in enumerate(zip(fps, distmats)):
for fp2, (dm2_labels, dm2_data) in zip(fps, distmats)[i + 1:]:
# Make the current pair of distance matrices compatible by only
# keeping samples that match between them, and ordering them by
# the same sample IDs.
(dm1_labels, dm1_data), (dm2_labels, dm2_data) = \
make_compatible_distance_matrices((dm1_labels, dm1_data),
(dm2_labels, dm2_data), lookup=sample_id_map)
if method == 'partial_mantel':
# We need to intersect three sets (three matrices).
(dm1_labels, dm1_data), (cdm_labels, cdm_data) = \
make_compatible_distance_matrices(
(dm1_labels, dm1_data), control_dm,
lookup=sample_id_map)
(dm1_labels, dm1_data), (dm2_labels, dm2_data) = \
make_compatible_distance_matrices(
(dm1_labels, dm1_data), (dm2_labels, dm2_data),
lookup=sample_id_map)
if len(dm1_labels) < 3:
result += '%s\t%s\t%s\t%d\tToo few samples\n' % (fp1,
fp2, control_dm_fp, len(dm1_labels))
continue
elif len(dm1_labels) < 3:
result += '%s\t%s\t%d\tToo few samples\n' % (fp1, fp2,
len(dm1_labels))
continue
dm1 = DistanceMatrix(dm1_data, dm1_labels)
dm2 = DistanceMatrix(dm2_data, dm2_labels)
if method == 'mantel':
corr_coeff, p_value, n = mantel(dm1, dm2, method='pearson',
permutations=num_perms, alternative=tail_type,
strict=True)
p_str = p_value_to_str(p_value, num_perms)
result += "%s\t%s\t%d\t%.5f\t%s\t%d\t%s\n" % (
fp1, fp2, n, corr_coeff, p_str, num_perms, tail_type)
elif method == 'partial_mantel':
cdm = DistanceMatrix(cdm_data, cdm_labels)
results = PartialMantel(dm1, dm2, cdm)(num_perms)
p_str = p_value_to_str(results['mantel_p'], num_perms)
result += "%s\t%s\t%s\t%d\t%.5f\t%s\t%d\t%s\n" % (
fp1, fp2, control_dm_fp, len(dm1_labels),
results['mantel_r'], p_str, num_perms, 'greater')
return result
def run_mantel_correlogram(fps, distmats, num_perms, comment, alpha,
sample_id_map=None,
variable_size_distance_classes=False):
"""Runs a Mantel correlogram analysis on all pairs of distance matrices.
Returns a string suitable for writing out to a file containing the results
of the test, a list of correlogram filepath names, and a list of matplotlib
Figure objects representing each correlogram.
The correlogram filepaths can have an extension string appended to the end
of them and then be used to save each of the correlogram Figures to a file.
Each correlogram filepath will be a combination of the two distance matrix
filepaths that were used to create it.
WARNING: Only symmetric, hollow distance matrices may be used as input.
Asymmetric distance matrices, such as those obtained by the UniFrac Gain
metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input.
Arguments:
fps - list of filepaths of the distance matrices
distmats - list of tuples containing dm labels and dm data (i.e. the
output of parse_distmat)
num_perms - the number of permutations to use to calculate the
p-value(s)
comment - comment string to add to the beginning of the results string
alpha - the alpha value to use to determine significance in the
correlogram plots
sample_id_map - dict mapping sample IDs (i.e. what is expected by
make_compatible_distance_matrices)
variable_size_distance_classes - create distance classes that vary in
size (i.e. width) but have the same number of distances in each
class
"""
if len(fps) != len(distmats):
raise ValueError("Must provide the same number of filepaths as there "
"are distance matrices.")
if comment is None:
comment = ''
result = comment + 'DM1\tDM2\tNumber of entries\t' + \
'Number of permutations\tClass index\t' + \
'Number of distances\tMantel r statistic\t' + \
'p-value\tp-value (Bonferroni corrected)\tTail type\n'
correlogram_fps = []
correlograms = []
# Loop over all pairs of dms.
for i, (fp1, (dm1_labels, dm1_data)) in enumerate(zip(fps, distmats)):
for fp2, (dm2_labels, dm2_data) in zip(fps, distmats)[i + 1:]:
# Make the current pair of distance matrices compatible by only
# keeping samples that match between them, and ordering them by
# the same sample IDs.
(dm1_labels, dm1_data), (dm2_labels, dm2_data) = \
make_compatible_distance_matrices((dm1_labels, dm1_data),
(dm2_labels, dm2_data), lookup=sample_id_map)
if len(dm1_labels) < 3:
result += '%s\t%s\t%d\tToo few samples\n' % (fp1, fp2,
len(dm1_labels))
continue
dm1 = DistanceMatrix(dm1_data, dm1_labels)
dm2 = DistanceMatrix(dm2_data, dm2_labels)
# Create an instance of our Mantel correlogram test and run it with
# the specified number of permutations.
mc = MantelCorrelogram(dm1, dm2, alpha=alpha,
variable_size_distance_classes=variable_size_distance_classes)
results = mc(num_perms)
# Generate a name for the current correlogram and save it and the
# correlogram itself.
dm1_name = path.basename(fp1)
dm2_name = path.basename(fp2)
correlogram_fps.append('_'.join((dm1_name, 'AND', dm2_name,
'mantel_correlogram')) + '.')
correlograms.append(results['correlogram_plot'])
# Iterate over the results and write them to the text file.
first_time = True
for class_idx, num_dist, r, p, p_corr in zip(
results['class_index'], results['num_dist'],
results['mantel_r'], results['mantel_p'],
results['mantel_p_corr']):
# Format p-values and figure out which tail type we have based
# on the sign of r.
p_str = None
if p is not None:
p_str = p_value_to_str(p, num_perms)
p_corr_str = None
if p_corr is not None:
p_corr_str = p_value_to_str(p_corr, num_perms)
if r is None:
tail_type = None
elif r < 0:
tail_type = 'less'
else:
tail_type = 'greater'
if first_time:
result += '%s\t%s\t%d\t%d\t%s\t%d\t%s\t%s\t%s\t%s\n' % (
fp1, fp2, len(dm1_labels), num_perms, class_idx,
num_dist, r, p_str, p_corr_str, tail_type)
first_time = False
else:
result += '\t\t\t\t%s\t%d\t%s\t%s\t%s\t%s\n' % (class_idx,
num_dist, r, p_str, p_corr_str, tail_type)
return result, correlogram_fps, correlograms
| gpl-2.0 |
kjung/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
gengho/Car2know | hw7only/cheng_hw7.py | 1 | 3031 |
# coding: utf-8
# before use this script, you should have the following packages
# you can install GeoPandas git clone https://github.com/kjordahl/geopandas
# and then install it with
# <pre><code>
# python setup.py install
# conda install fiona
# conda install pysal
# </code></pre>
#
# [email protected]
# Yuxuan Cheng
# https://sites.google.com/view/ycheng
# In[1]:
import shapely
import geopandas as gpd
# In[2]:
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
#from matplotlib import interactive
#interactive(True)
#%matplotlib inline
# In[3]:
import pysal
from scipy import ndimage
#p1 = shapely.geometry.Polygon([(0, 0), (1, 0), (1, 1)])
#p2 = shapely.geometry.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
#p3 = shapely.geometry.Polygon([(2, 0), (3, 0), (3, 1), (2, 1)])
#g = gpd.GeoSeries([p1,p2,p3])
#g.area
#g.plot()
#matplotlib.pyplot.show()
# In[4]:
#road the data
boros = gpd.GeoDataFrame.from_file('project/Output/SelectedBlocks.shp')
# In[ ]:
#show the first data frame
boros.ix[0]
#boros.blocks
# In[ ]:
#plot the dataframe
boros.plot()
matplotlib.pyplot.show()
# In[ ]:
#get the data(future will read antother dataset)
boros['InCount'] = boros['OBJECTID'].mod(255)
# In[ ]:
#show the range of OBJECCTID
boros['OBJECTID'].max()
# In[ ]:
#show the name of the “geometry” column of gpd
boros.geometry.name
# In[ ]:
#create the centroid of each block (point data)
boros['centroid_column'] = boros.centroid
# In[ ]:
#show the data
boros.head()
# In[ ]:
#change the name of column
boros.rename(columns={'centroid_column': 'centroid_XY'})
# In[ ]:
#plot the blocks and "In Count"
#%matplotlib qt
fig1 = boros.plot(column='InCount', cmap='viridis')
#fig1.colorbar()
plt.show()
plt.colorbar
plt.savefig('map2.pdf')
# In[ ]:
#the hel
def heatmap(d, bins=(100,100), smoothing=1.3, cmap='viridis'):
"""
plot the heat map
This function takes a GeoDataFrame with point geometries and shows a matplotlib plot of heatmap density
"""
def getx(pt):
return pt.coords[0][0]
def gety(pt):
return pt.coords[0][1]
x = list(d.geometry.apply(getx))
y = list(d.geometry.apply(gety))
heatmap, xedges, yedges = np.histogram2d(y, x, bins=bins)
extent = [yedges[0], yedges[-1], xedges[-1], xedges[0]]
#using numpy's 2D histogram binning with smoothing from scipy
logheatmap = np.log(heatmap)
logheatmap[np.isneginf(logheatmap)] = 0
logheatmap = ndimage.filters.gaussian_filter(logheatmap, smoothing, mode='nearest')
plt.imshow(logheatmap, cmap=cmap, extent=extent)
plt.colorbar()
plt.gca().invert_yaxis()
plt.show()
# In[ ]:
#create a new data whose “geometry” column is the center of blocks (point geometries)
boros_point = boros.set_geometry('centroid_column')
# In[ ]:
#plot the piont data
boros_point.plot()
plt.show()
# In[ ]:
#plot the heat map
heatmap(boros_point, bins=50, smoothing=1.5)
# In[ ]:
| mit |
vishnumani2009/OpenSource-Open-Ended-Statistical-toolkit | FRONTEND/heirarfrontend.py | 1 | 6985 | # -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
from scipy.cluster.hierarchy import linkage, dendrogram
from DM import *
import scipy.cluster.hierarchy as hac
import pandas as pd
import matplotlib.pyplot as plt
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(251, 371)
self.dis=euclidean_distance
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setGeometry(QtCore.QRect(20, 10, 221, 61))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.lin="single"
self.lineEdit = QtGui.QLineEdit(self.groupBox)
self.lineEdit.setGeometry(QtCore.QRect(40, 20, 141, 20))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.groupBox_4 = QtGui.QGroupBox(Form)
self.groupBox_4.setGeometry(QtCore.QRect(20, 80, 221, 121))
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.comboBox_2 = QtGui.QComboBox(self.groupBox_4)
self.comboBox_2.setGeometry(QtCore.QRect(30, 20, 161, 22))
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.activated[str].connect(self.getdis)
self.checkBox_3 = QtGui.QCheckBox(self.groupBox_4)
self.checkBox_3.setGeometry(QtCore.QRect(30, 60, 151, 17))
self.checkBox_3.setObjectName(_fromUtf8("checkBox_3"))
self.checkBox_4 = QtGui.QCheckBox(self.groupBox_4)
self.checkBox_4.setGeometry(QtCore.QRect(30, 90, 151, 17))
self.checkBox_4.setObjectName(_fromUtf8("checkBox_4"))
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(40, 330, 161, 23))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.pushButton_3.clicked.connect(self.starthcc)
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(40, 300, 161, 23))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton.clicked.connect(self.takeinput)
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setGeometry(QtCore.QRect(20, 200, 221, 80))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
## self.lineEdit_2 = QtGui.QLineEdit(self.groupBox_2)
## self.lineEdit_2.setGeometry(QtCore.QRect(30, 20, 161, 20))
## self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
##
## self.lineEdit_3 = QtGui.QLineEdit(self.groupBox_2)
## self.lineEdit_3.setGeometry(QtCore.QRect(30, 50, 161, 20))
## self.lineEdit_3.setObjectName(_fromUtf8("lineEdit_3"))
self.comboBox_3 = QtGui.QComboBox(self.groupBox_2)
self.comboBox_3.setGeometry(QtCore.QRect(30, 25, 161, 20))
self.comboBox_3.setObjectName(_fromUtf8("comboBox_2"))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.activated[str].connect(self.getlinkage)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def getlinkage(self,txt):
self.lin= txt
def takeinput(self):
fname = QtGui.QFileDialog.getOpenFileName(None, 'Open file', 'C:')
print type(fname)
df = pd.read_csv(str(fname), sep=",")
x=list(df[list(df)[0]])
y=list(df[list(df)[1]])
self.tr=(zip(x,y))
#print self.tr
def starthcc(self):
print self.dm,self.lin
dataFrame = pd.DataFrame(self.tr, columns=['x', 'y'])
from scipy.spatial.distance import pdist, squareform
# not printed as pretty, but the values are correct
distxy = squareform(pdist(dataFrame, metric=(self.dm)))
#print distxy
if self.lin=="single":
plt.figure()
R = dendrogram(linkage(distxy, method=str(self.lin)))
plt.xlabel('X units')
plt.ylabel('Y units')
plt.suptitle('Cluster Dendrogram', fontweight='bold', fontsize=14);
plt.show()
elif self.lin=="complete":
plt.figure()
R = dendrogram(linkage(distxy, method=str(self.lin)))
plt.xlabel('X units')
plt.ylabel('Y units')
plt.suptitle('Cluster Dendrogram', fontweight='bold', fontsize=14);
plt.show()
else:
plt.figure()
R = dendrogram(linkage(distxy, method=str(self.lin)))
plt.xlabel('X units')
plt.ylabel('Y units')
plt.suptitle('Cluster Dendrogram', fontweight='bold', fontsize=14);
plt.show()
def getdis(self,txt):
if txt=="chebychev":
self.dm=chebyshev_distance
elif txt=="cityblock":
self.dm=cityblock_distance
else:
self.dm=euclidean_distance
print self.dm
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Clusterer", None))
self.lineEdit.setText(_translate("Form", "Hierarchical", None))
self.groupBox_4.setTitle(_translate("Form", "Distance", None))
self.comboBox_2.setItemText(0, _translate("Form", "euclidean", None))
self.comboBox_2.setItemText(1, _translate("Form", "chebychev", None))
self.comboBox_2.setItemText(2, _translate("Form", "cityblock", None))
self.comboBox_3.setItemText(0, _translate("Form", "complete", None))
self.comboBox_3.setItemText(1, _translate("Form", "single", None))
self.comboBox_3.setItemText(2, _translate("Form", "average", None))
self.checkBox_3.setText(_translate("Form", "Random Data", None))
self.checkBox_4.setText(_translate("Form", "plot dendrogram", None))
self.pushButton_3.setText(_translate("Form", "Start", None))
self.pushButton.setText(_translate("Form", "Input File", None))
self.groupBox_2.setTitle(_translate("Form", "info", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Form()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| gpl-3.0 |
yyjiang/scikit-learn | examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py | 252 | 3490 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
| bsd-3-clause |
Nyker510/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
lukeiwanski/tensorflow-opencl | tensorflow/examples/learn/iris_run_config.py | 86 | 2087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
AnasGhrab/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
nomadcube/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
ngoix/OCRF | examples/cluster/plot_face_ward_segmentation.py | 71 | 2460 | """
=========================================================================
A demo of structured Ward hierarchical clustering on a raccoon face image
=========================================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
# Generate data
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
X = np.reshape(face, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*face.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward',
connectivity=connectivity)
ward.fit(X)
label = np.reshape(ward.labels_, face.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
ChanChiChoi/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 105 | 22788 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
gabhijit/tickdownload | read_sql_data.py | 2 | 3015 | #pylint: disable-msg=broad-except, global-statement
import pandas as pd
from sqlalchemy import desc
from tickerplot.sql.sqlalchemy_wrapper import execute_one
from tickerplot.sql.sqlalchemy_wrapper import create_or_get_all_scrips_table
from tickerplot.sql.sqlalchemy_wrapper import create_or_get_nse_equities_hist_data
from tickerplot.sql.sqlalchemy_wrapper import select_expr
from tickerplot.sql.sqlalchemy_wrapper import get_metadata
_DB_METADATA = None
def get_all_scrips_names_in_db(metadata=None):
all_scrips_table = create_or_get_all_scrips_table(metadata=metadata)
scrips_select_st = select_expr([all_scrips_table.c.nse_symbol]).\
where(all_scrips_table.c.nse_traded == True)
result = execute_one(scrips_select_st, engine=metadata.bind)
symbols = [row[0] for row in result.fetchall()]
return symbols
# FIXME metadata=None doesn't look correct, we need to pass db_meta perhaps?
def get_hist_data_as_dataframes_dict(metadata=None, limit=0, max_scrips=16000):
lscrips = get_all_scrips_names_in_db(metadata=metadata)
e = metadata.bind
hist_data = create_or_get_nse_equities_hist_data(metadata=metadata)
scripdata_dict = {}
scrips = 0
for scrip in lscrips:
sql_st = select_expr([hist_data.c.date,
hist_data.c.open, hist_data.c.high,
hist_data.c.low, hist_data.c.close,
hist_data.c.volume, hist_data.c.delivery]).\
where(hist_data.c.symbol == scrip).\
order_by(desc(hist_data.c.date))
if limit and isinstance(limit, int) and limit > 0:
sql_st = sql_st.limit(limit)
scripdata = pd.io.sql.read_sql(sql_st, e)
scripdata.columns = ['date', 'open', 'high', 'low', 'close', 'volume',
'delivery']
scripdata.reset_index(inplace=True)
scripdata.set_index(pd.DatetimeIndex(scripdata['date']), inplace=True)
scripdata.drop('date', axis=1, inplace=True)
scripdata_dict[scrip] = scripdata
scrips += 1
if scrips == max_scrips:
break
return scripdata_dict
def main(args):
import argparse
parser = argparse.ArgumentParser()
# --dbpath option
parser.add_argument("--dbpath",
help="Database URL to be used.",
dest="dbpath")
args = parser.parse_args()
# Make sure we can access the DB path if specified or else exit right here.
if args.dbpath:
try:
global _DB_METADATA
_DB_METADATA = get_metadata(args.dbpath)
except Exception as e:
print ("Not a valid DB URL: {} (Exception: {})".format(
args.dbpath, e))
return -1
get_hist_data_as_dataframes_dict()
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| mit |
haijieg/SFrame | oss_src/unity/python/sframe/test/test_graph.py | 1 | 16298 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
# from nose import with_setup
from ..data_structures.sgraph import SGraph, Vertex, Edge, load_graph
from ..data_structures.sframe import SFrame
from . import util
import pandas as pd
from pandas.util.testing import assert_frame_equal
import numpy as np
import unittest
import tempfile
import json
import os
import sys
if sys.version_info.major > 2:
unittest.TestCase.assertItemsEqual = unittest.TestCase.assertCountEqual
class GraphTests(unittest.TestCase):
def setUp(self):
self.vertices = pd.DataFrame({
'vid': ['1', '2', '3'],
'color': ['g', None, 'b'],
'vec': [[.1, .1, .1], [.1, .1, .1], [.1, .1, .1]]})
self.edges = pd.DataFrame({
'src_id': ['1', '2', '3'],
'dst_id': ['2', '3', '4'],
'weight': [0., None, 1.]})
def test_empty_graph(self):
g = SGraph()
self.assertEqual(g.summary(), {'num_vertices': 0, 'num_edges': 0})
self.assertEqual(len(g.get_fields()), 3)
self.assertTrue(g.get_vertices(format='sframe').shape, (0, 1))
self.assertTrue(g.get_edges(format='sframe').shape, (0, 2))
self.assertTrue(g.vertices.shape, (0, 1))
self.assertTrue(g.edges.shape, (0, 2))
self.assertTrue(len(g.get_vertices(format='list')) == 0)
self.assertTrue(len(g.get_edges(format='list')) == 0)
def test_graph_constructor(self):
g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')
g2 = SGraph(g.vertices, g.edges)
g3 = SGraph(g.vertices, g.edges, src_field="__dst_id", dst_field="__src_id") #flip around src and dst
assert_frame_equal(g.vertices.to_dataframe().sort('__id').reset_index(drop=True),
g2.vertices.to_dataframe().sort('__id').reset_index(drop=True))
assert_frame_equal(g.edges.to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True),
g2.edges.to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True))
self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges)))
self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges), 'vid', '__src_id', '__dst_id'))
self.assertRaises(ValueError, lambda: SGraph(SFrame(self.vertices), SFrame(self.edges), vid_field=None, src_field='src_id', dst_field='dst_id'))
def test_simple_graph(self):
for input_type in [pd.DataFrame, SFrame, list]:
g = SGraph()
if input_type is list:
vertices = [Vertex(x[1]['vid'], {'color': x[1]['color'], 'vec': x[1]['vec']}) for x in self.vertices.iterrows()]
edges = [Edge(x[1]['src_id'], x[1]['dst_id'], {'weight': x[1]['weight']}) for x in self.edges.iterrows()]
g = g.add_vertices(vertices)
g = g.add_edges(edges)
else:
g = g.add_vertices(input_type(self.vertices), vid_field='vid')
g = g.add_edges(input_type(self.edges), src_field='src_id', dst_field='dst_id')
self.assertEqual(g.summary(), {'num_vertices': 4, 'num_edges': 3})
self.assertItemsEqual(g.get_fields(), ['__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'])
self.assertItemsEqual(g.get_vertices(format='dataframe').columns.values, ['color', 'vec'])
self.assertItemsEqual(g.get_edges(format='dataframe').columns.values, ['__src_id', '__dst_id', 'weight'])
self.assertTrue(g.get_edges(format='dataframe').shape, (3, 3))
self.assertTrue(g.get_vertices(format='dataframe').shape, (4, 3))
self.assertTrue(g.get_vertices(format='dataframe', fields={'color': 'g'}).shape, (1, 2))
self.assertTrue(g.get_edges(format='dataframe', fields={'weight': 0.}).shape, (1, 3))
self.assertItemsEqual(g.get_vertices(format='sframe').column_names(), ['__id', 'color', 'vec'])
self.assertItemsEqual(g.get_edges(format='sframe').column_names(), ['__src_id', '__dst_id', 'weight'])
self.assertTrue(g.get_edges(format='sframe').shape, (3, 3))
self.assertTrue(g.get_vertices(format='sframe').shape, (4, 3))
self.assertTrue(g.get_vertices(format='sframe', fields={'color': 'g'}).shape, (1, 2))
self.assertTrue(g.get_edges(format='sframe', fields={'weight': 0.}).shape, (1, 3))
vertices = g.get_vertices(format='list')
edges = g.get_edges(format='list')
self.assertEqual(len(vertices), 4)
self.assertEqual(len(edges), 3)
# get edges is lazy
edges = g.get_edges()
self.assertFalse(edges.__is_materialized__())
def test_vertex_query(self):
df = pd.DataFrame({'src': ['a', 'c', 'b', 'd', 'c', 'e', 'g', 'f'],
'dst': ['b', 'b', 'd', 'c', 'e', 'g', 'f', 'e']})
g = SGraph().add_edges(df, src_field='src', dst_field='dst')
# basic check
g2 = g.get_neighborhood(ids=['b'], radius=1, full_subgraph=False)
out = g2.get_edges(format='dataframe')
out.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True)
out.index = range(len(out))
correct = pd.DataFrame.from_records([('b', 'd'),
('a', 'b'),
('c', 'b')],
columns=['__src_id', '__dst_id'])
correct.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True)
correct.index = range(len(correct))
assert_frame_equal(out, correct, check_dtype=False)
# check larger radius, full subgraph, and multiple vertices
g2 = g.get_neighborhood(ids=['a', 'g'], radius=2, full_subgraph=True)
out = g2.get_edges(format='dataframe')
out.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True)
out.index = range(len(out))
correct = pd.DataFrame.from_records([('a', 'b'),
('b', 'd'),
('c', 'b'),
('c', 'e'),
('d', 'c'),
('e', 'g'),
('f', 'e'),
('g', 'f')],
columns=['__src_id', '__dst_id'])
correct.sort(columns=['__src_id', '__dst_id'], axis=0, inplace=True)
correct.index = range(len(correct))
assert_frame_equal(out, correct, check_dtype=False)
def test_select_query(self):
g = SGraph()
g = g.add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')
g2 = g.select_fields(["color", "weight"])
self.assertSequenceEqual((g2.get_fields()), ['__id', 'color', '__src_id', '__dst_id', 'weight'])
g2 = g.select_fields(["color"])
self.assertSequenceEqual((g2.get_fields()), ['__id', 'color', '__src_id', '__dst_id'])
del g.edges['weight']
del g.vertices['vec']
g.vertices['color2'] = g.vertices['color']
self.assertSequenceEqual((g.get_fields()), ['__id', 'color', 'color2', '__src_id', '__dst_id'])
g2 = g.select_fields([])
self.assertSequenceEqual((g2.get_fields()), ['__id', '__src_id', '__dst_id'])
def test_select_query_with_same_vertex_edge_field(self):
vertices = SFrame({'__id': range(10)})
edges = SFrame({'__src_id': range(10), '__dst_id': range(1, 11)})
g = SGraph(vertices, edges)
g.vertices['weight'] = 0
g.vertices['v'] = 0
g.edges['weight'] = 0
g.edges['e'] = 0
self.assertItemsEqual(g.get_fields(), ['v', 'e', 'weight', 'weight', '__id', '__src_id', '__dst_id'])
g2 = g.select_fields('weight')
self.assertItemsEqual(g2.get_fields(), ['weight', 'weight', '__id', '__src_id', '__dst_id'])
def test_save_load(self):
g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')
with util.TempDirectory() as f:
g.save(f)
g2 = load_graph(f, 'binary')
self.assertEqual(g2.summary(), {'num_vertices': 4, 'num_edges': 3})
self.assertItemsEqual(g2.get_fields(), {'__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'})
with util.TempDirectory() as f:
g.save(f, format='csv')
vertices = SFrame.read_csv(f + "/vertices.csv")
edges = SFrame.read_csv(f + "/edges.csv")
g2 = SGraph().add_edges(edges, '__src_id', '__dst_id').add_vertices(vertices, '__id')
self.assertEqual(g2.summary(), {'num_vertices': 4, 'num_edges': 3})
self.assertItemsEqual(g2.get_fields(), {'__id', '__src_id', '__dst_id', 'color', 'vec', 'weight'})
temp_fn = None
# The delete=False is for Windows sake
with tempfile.NamedTemporaryFile(suffix='.json', delete=False) as f:
temp_fn = f.name
g.save(f.name)
with open(f.name, 'r') as f2:
data = f2.read()
g2 = json.loads(data)
self.assertTrue("vertices" in g2)
self.assertTrue("edges" in g2)
if os.path.exists(temp_fn):
os.remove(temp_fn)
def test_load_graph_from_text(self):
toy_graph_snap = """#some comment string
#some more comment string
1\t2
1\t3
2\t3
2\t1
3\t1
3\t2"""
toy_graph_tsv = """1\t2
1\t3
2\t3
2\t1
3\t1
3\t2"""
toy_graph_csv = """1,2
1,3
2,3
2,1
3,1
3,2"""
temp_fnames = []
with tempfile.NamedTemporaryFile(mode="w", delete=False) as fsnap, tempfile.NamedTemporaryFile(mode="w", delete=False) as ftsv, tempfile.NamedTemporaryFile(mode="w", delete=False) as fcsv:
fsnap.write(toy_graph_snap)
fsnap.file.flush()
ftsv.write(toy_graph_tsv)
ftsv.file.flush()
fcsv.write(toy_graph_csv)
fcsv.file.flush()
for (fname, fmt) in zip([fsnap.name, ftsv.name, fcsv.name], ['snap', 'tsv', 'csv']):
g = load_graph(fname, fmt)
self.assertEqual(g.summary(), {'num_vertices': 3, 'num_edges': 6})
temp_fnames.append(fname)
for name in temp_fnames:
if os.path.exists(name):
os.remove(name)
def test_robust_parse(self):
df = pd.DataFrame({'int': [1, 2, 3],
'float': [1., 2., 3.],
'str': ['one', 'two', 'three'],
'nan': [np.nan, np.nan, np.nan],
'sparse_int': [1, 2, np.nan],
'sparse_float': [np.nan, 2., 3.],
'sparse_str': [None, 'two', None]
})
g = SGraph().add_vertices(df)
self.assertItemsEqual(g.get_fields(), df.columns.tolist() + ['__id', '__src_id', '__dst_id'])
df2 = g.get_vertices(format='dataframe')
sf = g.get_vertices(format='sframe')
for col in df.columns:
# potential bug: df2 is missing the 'nan' column.
if (col != 'nan'):
self.assertItemsEqual(sorted(list(df2[col].dropna())), sorted(list(df[col].dropna())))
self.assertItemsEqual(sorted(list(sf[col].dropna())), sorted(list(df[col].dropna())))
def test_missing_value_vids(self):
vertices = SFrame()
vertices['vid'] = [1, 2, 3, None]
edges = SFrame()
edges['src'] = [1, 2, 3, None]
edges['dst'] = [4, 4, 4, 4]
self.assertRaises(RuntimeError, lambda : SGraph().add_vertices(vertices, 'vid').summary())
self.assertRaises(RuntimeError, lambda : SGraph().add_edges(edges, 'src', 'dst').summary())
self.assertRaises(RuntimeError, lambda : SGraph().add_edges(edges, 'dst', 'src').summary())
def test_gframe(self):
g = SGraph()
v = g.vertices
self.assertSequenceEqual(v.column_names(), ['__id'])
e = g.edges
self.assertSequenceEqual(e.column_names(), ['__src_id', '__dst_id'])
# Test vertices and edge attributes cannot be modified
def set_vertices_empty(g):
g.vertices = SFrame()
def set_edges_empty(g):
g.edges = SFrame()
def remove_vertices(g):
del g.vertices
def remove_edges(g):
del g.edges
def remove_edge_column(gf, name):
del gf[name]
self.assertRaises(AttributeError, lambda: remove_vertices(g))
self.assertRaises(AttributeError, lambda: remove_edges(g))
self.assertRaises(AttributeError, lambda: set_vertices_empty(g))
self.assertRaises(AttributeError, lambda: set_edges_empty(g))
# Test gframe operations has the same effect as its sframe+graph equivalent
g = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')
v = g.vertices
v['id_col'] = v['__id']
e = g.edges
e['src_id_col'] = e['__src_id']
e['dst_id_col'] = e['__dst_id']
g2 = SGraph().add_vertices(self.vertices, 'vid').add_edges(self.edges, 'src_id', 'dst_id')
new_vdata = g2.get_vertices()
new_vdata['id_col'] = new_vdata['__id']
new_edata = g2.get_edges()
new_edata['src_id_col'] = new_edata['__src_id']
new_edata['dst_id_col'] = new_edata['__dst_id']
g2 = SGraph().add_vertices(new_vdata, '__id').add_edges(new_edata, '__src_id', '__dst_id')
assert_frame_equal(g.get_vertices().to_dataframe().sort('__id').reset_index(drop=True),
g2.get_vertices().to_dataframe().sort('__id').reset_index(drop=True))
assert_frame_equal(g.get_edges().to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True),
g2.get_edges().to_dataframe().sort(['__src_id', '__dst_id']).reset_index(drop=True))
# check delete a column with exception, and edges is still in a valid state
self.assertRaises(KeyError, lambda: remove_edge_column(g.edges, 'badcolumn'))
g.edges.head()
# test slicing
assert_frame_equal(g.edges[:3].to_dataframe(), g.get_edges()[:3].to_dataframe())
assert_frame_equal(g.vertices[:3].to_dataframe(), g.get_vertices()[:3].to_dataframe())
# test add row number
e_expected = g.get_edges().to_dataframe()
v_expected = g.get_vertices().to_dataframe()
e_expected['id'] = range(len(e_expected))
v_expected['id'] = range(len(v_expected))
def test_sframe_le_append_skip_row_bug_is_fixed(self):
"""
This test is actually for SFrame lazy evaluation.
The reason it is here is because the repro can only be done in SGraph.
The bug appears when the SFrame has lazy_append and when passing through
the logical filter, skip_rows is not done correctly. So the edge_sframe
is in a bad state when not materialized.
This unit test stays here to ensure the bug is fixed until we can find
a more clean repro.
"""
n = 12 # smallest n to repro the le_append bug
# A graph with edge i -> i + 1
g = SGraph().add_edges(SFrame({'src': range(n), 'dst': range(1, n + 1)}), 'src', 'dst')
lazy_sf = g.get_edges()
materialized_sf = g.get_edges()
materialized_sf.__materialize__()
assert_frame_equal(lazy_sf[lazy_sf['__dst_id'] == n].to_dataframe(), materialized_sf[materialized_sf['__dst_id'] == n].to_dataframe())
| bsd-3-clause |
mwalton/artificial-olfaction | experiments/multiBGc/plotResults.py | 2 | 3928 | import matplotlib.pyplot as plt
#from Image import NEAREST
#from matplotlib.cm import cmap_d
import argparse
import os.path
import numpy as np
from casuarius import required
from sklearn.metrics import accuracy_score
from os import listdir
#import pylab as pl
def evaluateNS(path):
d = np.genfromtxt(path, delimiter=",", dtype="float32", skip_header=1)
ctx_pred = d[:,1]
f_pred = d[:,0]
target = d[:,2]
return (accuracy_score(target, f_pred), accuracy_score(target, ctx_pred))
def loadData(path):
d = np.genfromtxt(path, delimiter=",", dtype=None)
#header is the first row
h = d[0,:]
#select all rows in d where classifier == RBM
i = d[:,1] == 'rbm'
d = d[i]
#first col = BGc = x values of scatter plot
dX = (d[:, 0]).astype(np.float)
#remaining columns are accuracy values
dY = (d[:, range(2,np.shape(d)[1])]).astype(np.float)
#return tuple containing feature and target vectors
return (h, dX, dY)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--same", required = True,
help = "path to same train / test BG results")
ap.add_argument("-d", "--diff", required = True,
help = "path to diff train / test BG results")
ap.add_argument("-t", "--trainResult", required = True,
help = "training results output")
ap.add_argument("-f", "--fileOut", default="",
help = "if defined, plot will be written to file instead of displayed")
ap.add_argument("-S", "--sameNS", required=True,
help = "path to the NS data you want to plot for same condition")
ap.add_argument("-D", "--diffNS", required=True,
help = "path to the NS data you want to plot for the diff condition")
args = vars(ap.parse_args())
(sH, sX, sY) = loadData(args["same"])
(dH, dX, dY) = loadData(args["diff"])
trainResult = np.genfromtxt(args["trainResult"], delimiter=",", dtype=float, skip_header=1)
mean_BGc = trainResult[0]
######################## EVALUATE NS
sFolders = listdir(args["sameNS"])
dFolders = listdir(args["diffNS"])
ns_sY = []
ns_dY = []
ctx_sY = []
ctx_dY = []
for sF in sFolders:
if not sF.startswith('.'):
(tempFib, tempCtx) = evaluateNS(os.path.join(args["sameNS"], sF, "tstepAccuracy.csv"))
ns_sY.append(tempFib)
ctx_sY.append(tempCtx)
for dF in dFolders:
if not dF.startswith('.'):
(tempFib, tempCtx) = evaluateNS(os.path.join(args["diffNS"], dF, "tstepAccuracy.csv"))
ns_dY.append(tempFib)
ctx_dY.append(tempCtx)
"""
ns_sY = np.array(ns_sY)
ns_dY = np.array(ns_dY)
np.reshape(ns_sY, np.shape(sX))
np.reshape(ns_dY, np.shape(dX))
"""
######################## PLOT
fig = plt.figure()
ax1 = fig.add_subplot(111)
#ax1.scatter(sX, sY[:,0],c='b', marker='s', label='same BG')
samePlt = ax1.plot(sX, sY[:,0], '-s', c='b', label='RBM same BG')
diffPlt = ax1.plot(dX, dY[:,0], '-o', c='r', label='RBM diff BG')
sameNS_plt = ax1.plot(sX, ns_sY, '-s', c='g', label='Fibers same BG')
diffNS_plt = ax1.plot(dX, ns_dY, '-o', c='y', label='Fibers diff BG')
sameCtx_plt = ax1.plot(sX, ctx_sY, '-s', c='orange', label='Ctx same BG')
diffCtx_plt = ax1.plot(dX, ctx_dY, '-o', c='purple', label='Ctx diff BG')
ax1.set_title(sH[2])
box = ax1.get_position()
ax1.set_position([box.x0, box.y0 + box.height * 0.15,
box.width, box.height * 0.9])
ax1.legend(loc='upper center', ncol=3, bbox_to_anchor=(0.5, -0.1),
handles=[samePlt, diffPlt, sameNS_plt, diffNS_plt, sameCtx_plt, diffCtx_plt],
labels=['RBM same BG', 'RBM diff BG', 'NS same BG', 'NS diff BG', 'Ctx same BG', 'Ctx diff BG'])
ax1.axvline(mean_BGc, color='y')
ax1.set_xlabel('Mean Background Concentration')
ax1.set_ylabel('Accuracy')
#ax1.set_ylim([0.85, 1.0])
#ax1.scatter(dX, dY[:,0],c='r', marker='o', label='diff BG')
if (not args["fileOut"] == ""):
plt.savefig(args["fileOut"])
else:
plt.show()
#print(same)
| mit |
eickenberg/scikit-learn | benchmarks/bench_covertype.py | 6 | 9212 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import logging
import os
import sys
from time import time
from optparse import OptionParser
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import metrics
from sklearn.externals.joblib import Memory
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
op = OptionParser()
op.add_option("--classifiers",
dest="classifiers", default='liblinear,GaussianNB,SGD,CART',
help="comma-separated list of classifiers to benchmark. "
"default: %default. available: "
"liblinear, GaussianNB, SGD, CART, ExtraTrees,\n"
"RandomForest, GBRT")
op.add_option("--n-jobs",
dest="n_jobs", default=1, type=int,
help="Number of concurrently running workers for models that"
" support parallelism.")
# Each number generator use the same seed to avoid coupling issue between
# estimators.
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Common seed used by random number generator.")
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
joblib_cache_folder = os.path.join(get_data_home(), 'covertype_benchmark_data')
m = Memory(joblib_cache_folder, mmap_mode='r')
# Load the data, then cache and memmap the train/test split
@m.cache
def load_data(dtype=np.float32, order='C'):
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=opts.random_seed)
X, y = data['data'], data['target']
X = np.asarray(X, dtype=dtype)
if order.lower() == 'f':
X = np.asfortranarray(X)
# class 1 vs. all others.
y[np.where(y != 1)] = -1
######################################################################
## Create train-test split (as [Joachims, 2006])
logger.info("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
######################################################################
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = load_data()
######################################################################
## Print dataset statistics
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25),
X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25),
np.unique(y_train).shape[0]))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == -1), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == -1), int(X_test.nbytes / 1e6)))
classifiers = dict()
######################################################################
## Benchmark classifiers
def benchmark(clf):
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
err = metrics.zero_one_loss(y_test, pred, normalize=True)
return err, train_time, test_time
######################################################################
## Train Liblinear model
liblinear_parameters = {
'loss': 'l2',
'penalty': 'l2',
'C': 1000,
'dual': False,
'tol': 1e-3,
"random_state": opts.random_seed,
}
classifiers['liblinear'] = LinearSVC(**liblinear_parameters)
######################################################################
## Train GaussianNB model
classifiers['GaussianNB'] = GaussianNB()
######################################################################
## Train SGD model
sgd_parameters = {
'alpha': 0.001,
'n_iter': 2,
'n_jobs': opts.n_jobs,
"random_state": opts.random_seed,
}
classifiers['SGD'] = SGDClassifier(**sgd_parameters)
######################################################################
## Train CART model
classifiers['CART'] = DecisionTreeClassifier(min_samples_split=5,
max_depth=None,
random_state=opts.random_seed)
######################################################################
## Train RandomForest model
rf_parameters = {
"n_estimators": 20,
"n_jobs": opts.n_jobs,
"random_state": opts.random_seed,
}
classifiers['RandomForest'] = RandomForestClassifier(**rf_parameters)
######################################################################
## Train Extra-Trees model
classifiers['ExtraTrees'] = ExtraTreesClassifier(n_estimators=20,
n_jobs=opts.n_jobs,
random_state=opts.random_seed)
######################################################################
## Train GBRT model
classifiers['GBRT'] = GradientBoostingClassifier(n_estimators=250,
random_state=opts.random_seed)
selected_classifiers = opts.classifiers.split(',')
for name in selected_classifiers:
if name not in classifiers:
op.error('classifier %r unknown' % name)
sys.exit(1)
print()
print("Training Classifiers")
print("====================")
print()
err, train_time, test_time = {}, {}, {}
for name in sorted(selected_classifiers):
print("Training %s ..." % name)
err[name], train_time[name], test_time[name] = benchmark(classifiers[name])
######################################################################
## Print classification performance
print()
print("Classification performance:")
print("===========================")
print()
def print_row(clf_type, train_time, test_time, err):
print("%s %s %s %s" % (clf_type.ljust(12),
("%.4fs" % train_time).center(10),
("%.4fs" % test_time).center(10),
("%.4f" % err).center(10)))
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"error-rate"))
print("-" * 44)
for name in sorted(selected_classifiers, key=lambda name: err[name]):
print_row(name, train_time[name], test_time[name], err[name])
print()
print()
| bsd-3-clause |
gbrammer/pygrism | utils_c/interp.py | 2 | 7337 | """
Test suite for Cython utilities.
"""
import numpy as np
def interp_conserve(x, xp, fp, left=0., right=0.):
"""
Interpolate `xp`,`yp` array to the output x array, conserving flux.
`xp` can be irregularly spaced.
"""
midpoint = (x[1:]-x[:-1])/2.+x[:-1]
midpoint = np.append(midpoint, np.array([x[0],x[-1]]))
midpoint = midpoint[np.argsort(midpoint)]
int_midpoint = np.interp(midpoint, xp, fp, left=left, right=right)
int_midpoint[midpoint > xp.max()] = 0.
int_midpoint[midpoint < xp.min()] = 0.
fullx = np.append(xp, midpoint)
fully = np.append(fp, int_midpoint)
so = np.argsort(fullx)
fullx, fully = fullx[so], fully[so]
outy = x*0.
dx = midpoint[1:]-midpoint[:-1]
for i in range(len(x)):
bin = (fullx >= midpoint[i]) & (fullx <= midpoint[i+1])
outy[i] = np.trapz(fully[bin], fullx[bin])/dx[i]
return outy
def test_nmf():
import interp_c
import numpy as np
import time
import matplotlib.pyplot as plt
x = np.arange(0,16,0.5)*np.pi*2
NFILT = len(x)
NTEMP = 7
coeffs = np.random.random(NTEMP)
y = x*0.
templates = np.zeros((NTEMP, NFILT))
norms = np.zeros(NTEMP)
for i in range(0,NTEMP):
templates[i,:] = np.sin(x/((i+1)*1.6))+1.1
norms[i] = np.trapz(templates[i,:], x)
y = np.dot(coeffs.reshape((1,NTEMP)), templates).flatten()
err = y*0.+np.median(y)/10.
yerr = y+np.random.normal(size=NFILT)*err
t0 = time.time()
amatrix = interp_c.prepare_nmf_amatrix(err**2, templates)
t1= time.time()
coeffs_fit = interp_c.run_nmf(yerr, err**2, templates, amatrix, toler=1.e-5)
t2 = time.time()
print 'Prepare: %.4f' %(t1-t0)
print 'Fit : %.4f' %(t2-t1)
yfit = np.dot(coeffs_fit.reshape((1,-1)), templates).flatten()
fig = plt.figure()
ax = fig.add_subplot(311)
ax.plot(x, y/np.median(y), color='blue')
ax.errorbar(x, yerr/np.median(y), err/np.median(y), color='blue', marker='o', linestyle='None')
ax.plot(x, yfit/np.median(y), color='red')
ax.set_ylabel('"Flux"')
ax = fig.add_subplot(312)
ax.plot(x, y-yfit, color='blue')
ax.errorbar(x, yerr-yfit, err, color='blue', marker='o', linestyle='None')
ax.plot(x, yfit-yfit, color='red')
ax.set_ylabel(r'$\Delta$(obs-fit)')
chi2 = np.sum((yerr-yfit)**2/err**2)/(len(x)-1)
ax.text(0.1,0.8,r'$\chi^2_\nu$=%.3f' %(chi2), transform=ax.transAxes)
ax = fig.add_subplot(313)
ax.plot(np.log10(coeffs/coeffs_fit), color='orange')
ax.set_ylabel(r'$\log$($\Delta$coeff)')
#### Is sum of normalizations conserved?
norm_input = np.sum(norms*coeffs)
norm_fit = np.sum(norms*coeffs_fit)
int_fit = np.trapz(yfit, x)
print 'Norm_in: %.2f, Norm_fit: %.2f, trapz_fit: %.2f' %(norm_input, norm_fit, int_fit)
fig.savefig('/tmp/nmf.png')
def test():
import interp_c
import time
import scipy
import threedhst
import numpy as np
N = int(1.e6)
xfull = np.arange(0,N+1,1)*1.
#yfull = np.sin(xfull/(N/1239.)*2*np.pi)+1
yfull = np.sin(xfull/np.pi/2/20)+0.2
# coeffs = np.random.random(size=12)*5
# yfull = scipy.polyval(coeffs, xfull)
xint = np.arange(0,N+1,N/100)*1.
tstart = time.time()
denom = np.trapz(yfull,xfull)
tstart = time.time()
yint_0 = np.interp(xint, xfull, yfull)
t0 = time.time()
print 'Linear : %.3f (%.4e)' %(t0-tstart, np.trapz(yint_0, xint)/denom-1)
yint_x = interp_c.interp_c(xint, xfull, yfull)
tx = time.time()
print 'Linear(c) : %.3f (%.4e)' %(tx-t0, np.trapz(yint_x, xint)/denom-1)
xreverse = xint[::-1]
yint_y = interp_c.interp_c(xreverse, xfull, yfull, assume_sorted=0)
ty = time.time()
print 'Linear(c) rev : %.3f (%.4e)' %(ty-tx, np.trapz(yint_y, xint)/denom-1)
yint_1 = threedhst.utils.interp_conserve(xint, xfull, yfull)
t1 = time.time()
print 'Conserve : %.3f (%.4e)' %(t1-ty, np.trapz(yint_1, xint)/denom-1)
yint_2 = interp_c.interp_conserve(xint, xfull, yfull)
t2 = time.time()
print 'Conserve (Cython): %.3f (%.4e)' %(t2-t1, np.trapz(yint_2, xint)/denom-1)
yint_3 = interp_c.interp_conserve_c(xint, xfull, yfull)
t3 = time.time()
print 'Conserve (more c): %.3f (%.4e)' %(t3-t2, np.trapz(yint_3, xint)/denom-1)
yint_4 = threedhst.utils.interp_conserve_c(xint, xfull, yfull)
t4 = time.time()
print 'Inline c : %.3f (%.4e)' %(t4-t3, np.trapz(yint_4, xint)/denom-1)
#### Test interpolation
threedhst.showMessage('Interpolation')
#### Faster while n(int)/n(full) < 1./50
xint = xfull[1000:-1000:40]
tstart = time.time()
yint = np.interp(xint, xfull, yfull, left=0., right=0.)
t0 = time.time()
print 'Python : %.4f' %(t0-tstart)
yint1 = interp_c.interp_c(xint, xfull, yfull, extrapolate=0.)
t1 = time.time()
print 'Cython rewrite : %.4f (%.2e)' %(t1-t0, np.sum((yint1-yint)**2))
#### Test midpoint definition --- slices work better than by hand
threedhst.showMessage('Midpoint')
xmid = xfull
tstart = time.time()
midpoint = (xmid[1:]+xmid[:-1])/2.
midpoint = np.append(midpoint, np.array([xmid[0],xmid[-1]]))
midpoint = midpoint[np.argsort(midpoint)]
t0 = time.time()
print 'Python : %.3f %.2e' %(t0-tstart, np.sum((midpoint-midpoint)**2))
midpoint_c1 = interp_c.midpoint(xmid)
t1 = time.time()
print 'Cython : %.3f %.2e' %(t1-t0, np.sum((midpoint_c1-midpoint)**2))
midpoint_c2 = interp_c.midpoint_c(xmid, N+1)
t2 = time.time()
print 'Cython (opt): %.3f %.2e' %(t2-t1, np.sum((midpoint_c2-midpoint)**2))
# Compare cython to numba
#@autojit
def interpolate_tempfilt(tempfilt, zgrid, zi, output):
"""
interpolate_tempfilt(tempfilt, zgrid, zi, output)
Linear interpolate an Eazy "tempfilt" grid at z=zi.
`tempfilt` is [NFILT, NTEMP, NZ] integrated flux matrix
`zgrid` is [NZ] redshift grid
Result is stored in the input variable `output`, which needs shape [NFILT, NTEMP]
"""
#cdef unsigned long NT, NF, NZ, itemp, ifilt, iz
#cdef double dz, fint, fint2
#cdef extern from "math.h":
# double fabs(double)
sh = tempfilt.shape
NF, NT, NZ = sh[0], sh[1], sh[2]
#### Output array
#cdef np.ndarray[DTYPE_t, ndim=2] tempfilt_interp = np.zeros((NF, NT), dtype=DTYPE)
for iz in range(NZ-1):
dz = zgrid[iz+1]-zgrid[iz]
fint = 1 - (zi-zgrid[iz])/dz
if (fint > 0) & (fint <= 1):
fint2 = 1 - (zgrid[iz+1]-zi)/dz
# print iz, zgrid[iz], fint, fint2
for ifilt in range(NF):
for itemp in range(NT):
#print ifilt, itemp
output[ifilt, itemp] = tempfilt[ifilt, itemp, iz]*fint + tempfilt[ifilt, itemp, iz+1]*fint2
#
break
return output
#
from numba import double, jit
fast_interpolate_tempfilt = jit(double[:,:](double[:,:,:], double[:], double, double[:,:]))(interpolate_tempfilt)
if __name__ == "__main__":
test()
| mit |
phev8/dataset_tools | playground/object_recognition_playground.py | 1 | 7568 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from experiment_handler.object_recognition.object_detection_reader import read_filtered_object_detection_results, read_object_detections
from experiment_handler.label_data_reader import read_experiment_phases, read_location_labels, read_activity_labels_from_eyetracker_labelling
def object_rec_by_label_per_location(exp_root, model):
phases = read_experiment_phases(exp_root)
start = phases['assembly'][0]
end = phases['disassembly'][1]
object_recognitions = read_filtered_object_detection_results(exp_root, model, start, end, "video")
loc_labels = read_location_labels(exp_root)
print(loc_labels)
infos = []
for p in loc_labels.keys():
detections_for_person = object_recognitions.loc[object_recognitions["person_id"] == p]
for label in loc_labels[p]:
during_label = detections_for_person.loc[detections_for_person["timestamp"].between(label["start"], label["end"])]
current = {
"person_id": p,
"location": label["location"],
"duration": label["end"] - label["start"],
"screwdriver": during_label[during_label["label"] == "screwdriver"].size,
"power_drill": during_label[during_label["label"] == "power_drill"].size
}
infos.append(current)
infos = pd.DataFrame(infos)
#print(infos)
locations = infos["location"].unique()
for loc in locations:
at_location = infos.loc[infos["location"] == loc]
print(loc, at_location["screwdriver"].sum()/at_location["duration"].sum(), at_location["power_drill"].sum()/at_location["duration"].sum(), at_location["duration"].sum())
def object_rec_by_label_per_activity(experiment, model, replacements, remove_labels):
phases = read_experiment_phases(experiment)
start = phases['assembly'][0]
end = phases['disassembly'][1]
"""
replacements = [
("TV lifting: taking out of the box", "no"),
("TV lifting: putting on the wall", "no"),
("TV lifting: taking off the wall", "no"),
("TV lifting: putting in the box", "no"),
("walking on the floor", "no"),
("carry tv spacer", "no"),
("carry tools", "no"),
("carry screen", "no"),
("screw: by screw driver", "Precise"),
("screw: by electric drill", "Precise"),
("screw: by hand", "Precise"),
("placing items", "Precise"),
("unpack tools", "Precise"),
]
"""
activity_labels = read_activity_labels_from_eyetracker_labelling(experiment, "video", replacements, remove_labels)
activity_labels.drop_duplicates(inplace=True)
object_recognitions = read_filtered_object_detection_results(experiment, model, start, end, "video")
object_recognitions.drop(object_recognitions[object_recognitions["top_index"] > 5].index, inplace=True)
activity_labels["duration"] = activity_labels["end"] - activity_labels["start"]
activities = activity_labels["label"].unique()
persons = activity_labels["subject"].unique()
infos = []
for p in persons:
detections_for_person = object_recognitions.loc[object_recognitions["person_id"] == p]
for index, label in activity_labels.loc[activity_labels["subject"] == p].iterrows():
during_label = detections_for_person.loc[detections_for_person["timestamp"].between(label["start"], label["end"])]
current = {
"person_id": p,
"activity": label["label"],
"duration": label["end"] - label["start"],
"screwdriver": during_label[during_label["label"] == "screwdriver"].size,
"power_drill": during_label[during_label["label"] == "power_drill"].size
}
infos.append(current)
infos = pd.DataFrame(infos)
#print(infos)
for act in activities:
at_location = infos.loc[infos["activity"] == act]
print(act, at_location["screwdriver"].sum()/at_location["duration"].sum(), at_location["power_drill"].sum()/at_location["duration"].sum(), at_location["duration"].sum())
f, axarr = plt.subplots(4, sharex=True, figsize=(16, 10))
for idx, test_for in enumerate(["P1", "P2", "P3", "P4"]):
person_dets = object_recognitions.loc[object_recognitions["person_id"] == test_for]
screwdriver_det_times = person_dets.loc[person_dets["label"] == "screwdriver", "timestamp"].as_matrix()
screwdriver_y = np.ones(len(screwdriver_det_times))*6.4
drill_det_times = person_dets.loc[person_dets["label"] == "power_drill", "timestamp"].as_matrix()
drill_y = np.ones(len(drill_det_times)) * 0.6
axarr[idx].plot(screwdriver_det_times, screwdriver_y, '|', ms=10, color="red", label="Screwdriver")
axarr[idx].plot(drill_det_times, drill_y, '|', ms=10, color="olive", label="Power drill")
height = 0.05
for index, label in activity_labels.loc[activity_labels["subject"] == test_for].iterrows():
y_pos = (list(activities).index(label["label"])) / (len(activities)) + 0.08
axarr[idx].axvspan(label["start"], label["end"], y_pos - height / 2, y_pos + height / 2, color="#1f77b4")
axarr[idx].grid()
axarr[idx].legend()
axarr[idx].set_title(test_for)
axarr[idx].set_ylabel("Activity")
axarr[idx].set_yticks(range(1, len(activities) + 1))
axarr[idx].set_ylim([0.5, 6.5])
axarr[idx].set_yticklabels(activities)
plt.xlabel("Time [s]")
plt.show()
def train_activity_with_object_rec(experiment, model, replacements, to_remove):
phases = read_experiment_phases(experiment)
start = phases['assembly'][0]
end = phases['disassembly'][1]
activity_labels = read_activity_labels_from_eyetracker_labelling(experiment, "video", replacements, to_remove)
activity_labels.drop_duplicates(inplace=True)
object_recognitions = read_object_detections(experiment, model, start, end, "video")
persons = ["P1", "P2", "P3", "P4"]
features = {}
for p in persons:
feature_matrix = object_recognitions[p]
#print(feature_matrix[:, 0])
#print(feature_matrix[:, 1])
for row in feature_matrix:
print(row)
# TODO: create feature matrix and samples
# TODO: train with three person test on one
if __name__ == '__main__':
exp_root = "/Volumes/DataDrive/igroups_recordings/igroups_experiment_8"
model_name = "ResNet50"
activity_label_replacements = [
("walking on the floor", "Walk"),
("carry tv spacer", "Walk"),
("carry tools", "Walk"),
("TV lifting: taking out of the box", "Screen placement"),
("TV lifting: putting on the wall", "Screen placement"),
("TV lifting: taking off the wall", "Screen placement"),
("TV lifting: putting in the box", "Screen placement"),
("carry screen", "Carry"),
("screw: by screw driver", "Screwdriver"),
("screw: by electric drill", "Drill"),
("screw: by hand", "Adjust"),
("placing items", "Adjust"),
("unpack tools", "Adjust"),
]
activity_labels_to_remove = [
"synchronisation",
]
object_rec_by_label_per_location(exp_root, model_name)
object_rec_by_label_per_activity(exp_root, model_name, activity_label_replacements, activity_labels_to_remove)
#train_activity_with_object_rec(exp_root, model_name, activity_label_replacements, activity_labels_to_remove) | mit |
berkeley-stat222/mousestyles | mousestyles/path_diversity/path_index.py | 3 | 2301 | from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
def path_index(movement, stop_threshold, min_path_length):
r"""
Return a list object containing start and end indices
for a specific movement. Each element in the list is
a list containing two indices: the first element is
the start index and the second element is the end index.
Parameters
----------
movement : pandas.DataFrame
CT, CX, CY coordinates and homebase status
for the unique combination of strain, mouse and day
stop_threshold : float
positive number indicating the path cutoff criteria
if the time difference between two observations is
less than this threhold, they will be in the same path
min_path_length : int
positive integer indicating how many observations in
a path
Returns
-------
paths index : a list containing the indices for all paths
Examples
--------
>>> movement = data.load_movement(1, 2, 1)
>>> paths = path_index(movement, 1, 1)[:5]
>>> paths
[[0, 2], [6, 8], [107, 113], [129, 131], [144, 152]]
"""
# check if all inputs are positive integers
conditions_value = [stop_threshold <= 0, min_path_length <= 0]
conditions_type = type(min_path_length) != int
if any(conditions_value):
raise ValueError("Input values need to be positive")
if conditions_type:
raise TypeError("min_path_length needs to be integer")
# Pull out time variable
T = movement['t'].ravel()
# Calculate time differences
TD = np.diff(T)
path = []
# index
i = 0
while i < len(TD):
start_index = i
# If time difference is less than stop_threshold
# start to track the index in this path
while TD[i] < stop_threshold:
i += 1
if i == len(TD):
break
end_index = i
# Check whether start index is equal to end index
# If they are equal jump to next index
if start_index == end_index:
next
else:
path.append([start_index, end_index])
i += 1
path = [p for p in path if (p[1] - p[0]) > min_path_length]
return path
| bsd-2-clause |
xyguo/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
nkhuyu/airflow | airflow/www/app.py | 3 | 68914 | from __future__ import print_function
from __future__ import division
from builtins import str
from past.utils import old_div
import copy
from datetime import datetime, timedelta
import dateutil.parser
from functools import wraps
import inspect
import json
import logging
import os
import socket
import sys
import time
from flask._compat import PY2
from flask import (
Flask, url_for, Markup, Blueprint, redirect,
flash, Response, render_template)
from flask.ext.admin import Admin, BaseView, expose, AdminIndexView
from flask.ext.admin.form import DateTimePickerWidget
from flask.ext.admin import base
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.cache import Cache
from flask import request
import sqlalchemy as sqla
from wtforms import (
widgets,
Form, DateTimeField, SelectField, TextAreaField, PasswordField, StringField)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import chartkick
import jinja2
import markdown
from sqlalchemy import or_
import airflow
from airflow import jobs, login, models, settings, utils
from airflow.configuration import conf
from airflow.models import State
from airflow.settings import Session
from airflow.utils import AirflowException
from airflow.www import utils as wwwutils
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
from airflow import default_login as login
if conf.getboolean('webserver', 'AUTHENTICATE'):
try:
# Environment specific login
import airflow_login as login
except ImportError:
logging.error(
"authenticate is set to True in airflow.cfg, "
"but airflow_login failed to import")
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
AUTHENTICATE = conf.getboolean('webserver', 'AUTHENTICATE')
if AUTHENTICATE is False:
login_required = lambda x: x
FILTER_BY_OWNER = False
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = AUTHENTICATE
class VisiblePasswordInput(widgets.PasswordInput):
def __init__(self, hide_value=False):
self.hide_value = hide_value
class VisiblePasswordField(PasswordField):
widget = VisiblePasswordInput()
def superuser_required(f):
'''
Decorator for views requiring superuser access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.is_superuser())
):
return f(*args, **kwargs)
else:
flash("This page requires superuser privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: pygment_html_render(x, lexers.BashLexer),
'hql': lambda x: pygment_html_render(x, lexers.SqlLexer),
'sql': lambda x: pygment_html_render(x, lexers.SqlLexer),
'doc': lambda x: pygment_html_render(x, lexers.TextLexer),
'doc_json': lambda x: pygment_html_render(x, lexers.JsonLexer),
'doc_rst': lambda x: pygment_html_render(x, lexers.RstLexer),
'doc_yaml': lambda x: pygment_html_render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: pygment_html_render(
inspect.getsource(x), lexers.PythonLexer),
}
dagbag = models.DagBag(os.path.expanduser(conf.get('core', 'DAGS_FOLDER')))
utils.pessimistic_connection_handling()
app = Flask(__name__)
app.config['SQLALCHEMY_POOL_RECYCLE'] = 3600
app.secret_key = conf.get('webserver', 'SECRET_KEY')
login.login_manager.init_app(app)
cache = Cache(
app=app, config={'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '/tmp'})
# Init for chartkick, the python wrapper for highcharts
ck = Blueprint(
'ck_page', __name__,
static_folder=chartkick.js(), static_url_path='/static')
app.register_blueprint(ck, url_prefix='/ck')
app.jinja_env.add_extension("chartkick.ext.charts")
@app.context_processor
def jinja_globals():
return {
'hostname': socket.gethostname(),
}
class DateTimeForm(Form):
# Date filter form needed for gantt and graph view
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
class GraphForm(Form):
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
class TreeForm(Form):
base_date = DateTimeField(
"Anchor date", widget=DateTimePickerWidget(), default=datetime.now())
num_runs = SelectField("Number of runs", default=25, choices=(
(5, "5"),
(25, "25"),
(50, "50"),
(100, "100"),
(365, "365"),
))
@app.route('/')
def index():
return redirect(url_for('admin.index'))
@app.route('/health')
def health():
""" We can add an array of tests here to check the server's health """
content = Markup(markdown.markdown("The server is healthy!"))
return content
@app.teardown_appcontext
def shutdown_session(exception=None):
settings.Session.remove()
def dag_link(v, c, m, p):
url = url_for(
'airflow.graph',
dag_id=m.dag_id)
return Markup(
'<a href="{url}">{m.dag_id}</a>'.format(**locals()))
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = None
# filter the dags if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
if do_filter:
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active, DM.owners == current_user.username).all()
else:
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active).all()
orm_dags = {dag.dag_id: dag for dag in qry}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
dags = dagbag.dags.values()
if do_filter:
dags = {dag.dag_id: dag for dag in dags if (dag.owner == current_user.username and (not dag.parent_dag))}
else:
dags = {dag.dag_id: dag for dag in dags if not dag.parent_dag}
all_dag_ids = sorted(set(orm_dags.keys()) | set(dags.keys()))
return self.render(
'airflow/dags.html',
dags=dags,
orm_dags=orm_dags,
all_dag_ids=all_dag_ids)
admin = Admin(
app,
name="Airflow",
index_view=HomeView(name="DAGs"),
template_mode='bootstrap3')
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).all()[0]
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).all()[0]
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
from airflow import macros
args.update(request_dict)
args['macros'] = macros
sql = jinja2.Template(chart.sql).render(**args)
label = jinja2.Template(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
import pandas as pd
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart_type == "datatable":
chart.show_datatable = True
if chart.show_datatable:
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
except Exception as e:
raise AirflowException(str(e))
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
series = []
colorAxis = None
if chart_type == 'datatable':
payload['data'] = data
payload['state'] = 'SUCCESS'
return Response(
response=json.dumps(
payload, indent=4, default=date_handler),
status=200,
mimetype="application/json")
elif chart_type == 'para':
df.rename(columns={
df.columns[0]: 'name',
df.columns[1]: 'group',
}, inplace=True)
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
elif chart_type == 'heatmap':
color_perc_lbound = float(
request.args.get('color_perc_lbound', 0))
color_perc_rbound = float(
request.args.get('color_perc_rbound', 1))
color_scheme = request.args.get('color_scheme', 'blue_red')
if color_scheme == 'blue_red':
stops = [
[color_perc_lbound, '#00D1C1'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#FFFFCC'
],
[color_perc_rbound, '#FF5A5F']
]
elif color_scheme == 'blue_scale':
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_rbound, '#2222FF']
]
elif color_scheme == 'fire':
diff = float(color_perc_rbound - color_perc_lbound)
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_lbound + 0.33*diff, '#FFFF00'],
[color_perc_lbound + 0.66*diff, '#FF0000'],
[color_perc_rbound, '#000000']
]
else:
stops = [
[color_perc_lbound, '#FFFFFF'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#888888'
],
[color_perc_rbound, '#000000'],
]
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
data = []
for row in df.itertuples():
data.append({
'x': row[2],
'y': row[3],
'value': row[4],
})
x_format = '{point.x:%Y-%m-%d}' \
if chart.x_is_date else '{point.x}'
series.append({
'data': data,
'borderWidth': 0,
'colsize': 24 * 36e5,
'turboThreshold': sys.float_info.max,
'tooltip': {
'headerFormat': '',
'pointFormat': (
df.columns[1] + ': ' + x_format + '<br/>' +
df.columns[2] + ': {point.y}<br/>' +
df.columns[3] + ': <b>{point.value}</b>'
),
},
})
colorAxis = {
'stops': stops,
'minColor': '#FFFFFF',
'maxColor': '#000000',
'min': 50,
'max': 2200,
}
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
for col in df.columns:
series.append({
'name': col,
'data': [
(k, df[col][k])
for k in df[col].keys()
if not np.isnan(df[col][k])]
})
series = [serie for serie in sorted(
series, key=lambda s: s['data'][0][1], reverse=True)]
if chart_type == "stacked_area":
stacking = "normal"
chart_type = 'area'
elif chart_type == "percent_area":
stacking = "percent"
chart_type = 'area'
else:
stacking = None
hc = {
'chart': {
'type': chart_type
},
'plotOptions': {
'series': {
'marker': {
'enabled': False
}
},
'area': {'stacking': stacking},
},
'title': {'text': ''},
'xAxis': {
'title': {'text': xaxis_label},
'type': 'datetime' if chart.x_is_date else None,
},
'yAxis': {
'title': {'text': yaxis_label},
},
'colorAxis': colorAxis,
'tooltip': {
'useHTML': True,
'backgroundColor': None,
'borderWidth': 0,
},
'series': series,
}
if chart.y_log_scale:
hc['yAxis']['type'] = 'logarithmic'
hc['yAxis']['minorTickInterval'] = 0.1
if 'min' in hc['yAxis']:
del hc['yAxis']['min']
payload['state'] = 'SUCCESS'
payload['hc'] = hc
payload['data'] = data
payload['request_dict'] = request_dict
return Response(
response=json.dumps(payload, indent=4, default=date_handler),
status=200,
mimetype="application/json")
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).all()[0]
session.expunge_all()
session.commit()
session.close()
if chart.chart_type == 'para':
return self.render('airflow/para/para.html', chart=chart)
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/highchart.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
@login_required
def dag_stats(self):
states = [
State.SUCCESS,
State.RUNNING,
State.FAILED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
State.QUEUED,
]
task_ids = []
for dag in dagbag.dags.values():
task_ids += dag.task_ids
TI = models.TaskInstance
session = Session()
qry = (
session.query(TI.dag_id, TI.state, sqla.func.count(TI.task_id))
.filter(TI.task_id.in_(task_ids))
.group_by(TI.dag_id, TI.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.dag_id] = []
for state in states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.dag_id].append(d)
return Response(
response=json.dumps(payload, indent=4),
status=200, mimetype="application/json")
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
code = "".join(open(dag.full_filepath, 'r').readlines())
title = dag.filepath
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@app.errorhandler(404)
def circles(self):
return render_template('airflow/circles.html'), 404
@expose('/sandbox')
@login_required
def sandbox(self):
from airflow import configuration
title = "Sandbox Suggested Configuration"
cfg_loc = configuration.AIRFLOW_CONFIG + '.sandbox'
f = open(cfg_loc, 'r')
config = f.read()
f.close()
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
code_html=code_html, title=title, subtitle=cfg_loc)
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {k: v for k, v in request.headers}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
return Response(
response=json.dumps(d, indent=4),
status=200, mimetype="application/json")
@expose('/login')
def login(self):
return login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
conf.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "/{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = BASE_LOG_FOLDER + log_relative
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
session = Session()
dttm = dateutil.parser.parse(execution_date)
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
if ti:
host = ti.hostname
if socket.gethostname() == host:
try:
f = open(loc)
log += "".join(f.readlines())
f.close()
except:
log = "Log file isn't where expected.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = (
"http://{host}:{WORKER_LOG_SERVER_PORT}/log"
"{log_relative}").format(**locals())
log += "Log file isn't local.\n"
log += "Fetching here: {url}\n".format(**locals())
try:
import requests
log += requests.get(url).text
except:
log += "Failed to fetch log file.".format(**locals())
session.commit()
session.close()
log = log.decode('utf-8') if PY2 else log
title = "Log"
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title=title, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
def task(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = dag.get_task(task_id)
task = copy.copy(task)
task.resolve_template_files()
attributes = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
attributes.append((attr_name, str(attr)))
title = "Task Details"
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
return self.render(
'airflow/task.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/action')
@login_required
def action(self):
action = request.args.get('action')
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
if action == "run":
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
force = request.args.get('force') == "true"
deps = request.args.get('deps') == "true"
ti = models.TaskInstance(task=task, execution_date=execution_date)
executor.start()
executor.queue_task_instance(
ti, force=force, ignore_dependencies=deps)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
elif action == 'clear':
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
elif action == 'success':
# Flagging tasks as successful
session = settings.Session()
task_ids = [task_id]
if downstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=False)]
if upstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=True)]
TI = models.TaskInstance
tis = session.query(TI).filter(
TI.dag_id == dag_id,
TI.execution_date == execution_date,
TI.task_id.in_(task_ids)).all()
if confirmed:
updated_task_ids = []
for ti in tis:
updated_task_ids.append(ti.task_id)
ti.state = State.SUCCESS
session.commit()
to_insert = list(set(task_ids) - set(updated_task_ids))
for task_id in to_insert:
ti = TI(
task=dag.get_task(task_id),
execution_date=execution_date,
state=State.SUCCESS)
session.add(ti)
session.commit()
session.commit()
session.close()
flash("Marked success on {} task instances".format(
len(task_ids)))
return redirect(origin)
else:
if not task_ids:
flash("No task instances to mark as successful", 'error')
response = redirect(origin)
else:
tis = []
for task_id in task_ids:
tis.append(TI(
task=dag.get_task(task_id),
execution_date=execution_date,
state=State.SUCCESS))
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to mark as successful:"),
details=details,)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if not base_date:
base_date = dag.latest_execution_date or datetime.now()
else:
base_date = dateutil.parser.parse(base_date)
base_date = utils.round_time(base_date, dag.schedule_interval)
form = TreeForm(data={'base_date': base_date, 'num_runs': num_runs})
start_date = dag.start_date
if not start_date and 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
if start_date:
difference = base_date - start_date
offset = timedelta(seconds=int(difference.total_seconds() % dag.schedule_interval.total_seconds()))
base_date -= offset
base_date -= timedelta(microseconds=base_date.microsecond)
from_date = (base_date - (num_runs * dag.schedule_interval))
dates = utils.date_range(
from_date, base_date, dag.schedule_interval)
task_instances = {}
for ti in dag.get_task_instances(session, from_date):
task_instances[(ti.task_id, ti.execution_date)] = ti
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / len(dag.roots)
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
return {
'name': task.task_id,
'instances': [
utils.alchemy_to_dict(
task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
if len(dag.roots) > 1:
# d3 likes a single root
data = {
'name': 'root',
'instances': [],
'children': [recurse_nodes(t, set()) for t in dag.roots]
}
elif len(dag.roots) == 1:
data = recurse_nodes(dag.roots[0], set())
else:
flash("No tasks found.", "error")
data = []
data = json.dumps(data, indent=4, default=utils.json_ser)
session.commit()
session.close()
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
arrange = request.args.get('arrange', "LR")
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = GraphForm(data={'execution_date': dttm, 'arrange': arrange})
task_instances = {
ti.task_id: utils.alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)
}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks
}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.duration:
data.append([
ti.execution_date.isoformat(),
float(ti.duration) / (60*60)
])
if data:
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
chart_options={'yAxis': {'title': {'text': 'hours'}}},
height="700px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/landing_times')
@login_required
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.end_date:
data.append([
ti.execution_date.isoformat(), old_div((
ti.end_date - (
ti.execution_date + task.schedule_interval)
).total_seconds(),(60*60))
])
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
height="700px",
chart_options={'yAxis': {'title': {'text': 'hours after 00:00'}}},
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/paused')
@login_required
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect('/')
@expose('/refresh_all')
@login_required
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti
for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
data = []
for i, ti in enumerate(tis):
end_date = ti.end_date or datetime.now()
tasks += [ti.task_id]
color = State.color(ti.state)
data.append({
'x': i,
'low': int(ti.start_date.strftime('%s')) * 1000,
'high': int(end_date.strftime('%s')) * 1000,
'color': color,
})
height = (len(tis) * 25) + 50
session.commit()
session.close()
hc = {
'chart': {
'type': 'columnrange',
'inverted': True,
'height': height,
},
'xAxis': {'categories': tasks},
'yAxis': {'type': 'datetime'},
'title': {
'text': None
},
'plotOptions': {
'series': {
'cursor': 'pointer',
'minPointLength': 4,
},
},
'legend': {
'enabled': False
},
'series': [{
'data': data
}]
}
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
hc=json.dumps(hc, indent=4),
height=height,
demo_mode=demo_mode,
root=root,
)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
admin.add_view(Airflow(name='DAGs'))
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/')
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.args.get('conn_id')
csv = request.args.get('csv') == "true"
sql = request.args.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes="table table-bordered table-striped no-wrap",
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
admin.add_view(QueryView(name='Ad Hoc Query', category="Data Profiling"))
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
def log_link(v, c, m, p):
url = url_for(
'airflow.log',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
'<a href="{url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
url = url_for(
'airflow.task',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=m.dag_id,
root=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{m.task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_f(v, c, m, p):
color = State.color(m.state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{m.state}</span>'.format(**locals()))
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
mv = JobModelView(jobs.BaseJob, Session, name="Jobs", category="Browse")
admin.add_view(mv)
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
mv = LogModelView(
models.Log, Session, name="Logs", category="Browse")
admin.add_view(mv)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator')
named_filter_urls = True
column_formatters = dict(
log=log_link, task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('start_date', True)
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queued_dttm', 'log')
can_delete = True
page_size = 500
mv = TaskInstanceModelView(
models.TaskInstance, Session, name="Task Instances", category="Browse")
admin.add_view(mv)
mv = DagModelView(
models.DagModel, Session, name=None)
admin.add_view(mv)
# Hack to not add this view to the menu
admin._menu = admin._menu[:-1]
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted',)
form_overrides = dict(_password=VisiblePasswordField)
form_widget_args = {
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path' : StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
}
form_choices = {
'conn_type': [
('ftp', 'FTP',),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('mssql', 'Microsoft SQL Server'),
]
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc']:
extra = {
key:formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def is_secure(self):
"""
Used to display a message in the Connection list view making it clear
that the passwords can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception as e:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
mv = ConnectionModelView(
models.Connection, Session,
name="Connections", category="Admin")
admin.add_view(mv)
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
mv = UserModelView(models.User, Session, name="Users", category="Admin")
admin.add_view(mv)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
from airflow import configuration
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = configuration.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(configuration.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle)
admin.add_view(ConfigurationView(name='Configuration', category="Admin"))
def label_link(v, c, m, p):
try:
default_params = eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('para', 'Parallel Coordinates'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('heatmap', 'Heatmap'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if AUTHENTICATE and not model.user_id and current_user:
model.user_id = current_user.id
model.last_modified = datetime.now()
mv = ChartModelView(
models.Chart, Session,
name="Charts", category="Data Profiling")
admin.add_view(mv)
admin.add_link(
base.MenuLink(
category='Docs',
name='Documentation',
url='http://pythonhosted.org/airflow/'))
admin.add_link(
base.MenuLink(
category='Docs',
name='Github',
url='https://github.com/airbnb/airflow'))
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
mv = KnowEventView(
models.KnownEvent, Session, name="Known Events", category="Data Profiling")
admin.add_view(mv)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
'''
# For debugging / troubleshooting
mv = KnowEventTypeView(
models.KnownEventType,
Session, name="Known Event Types", category="Manage")
admin.add_view(mv)
class DagPickleView(SuperUserMixin, ModelView):
pass
mv = DagPickleView(
models.DagPickle,
Session, name="Pickles", category="Manage")
admin.add_view(mv)
'''
class VariableView(wwwutils.LoginMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
column_list = ('key',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'val': {
'rows': 20,
}
}
mv = VariableView(
models.Variable, Session, name="Variables", category="Admin")
admin.add_view(mv)
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
mv = PoolModelView(models.Pool, Session, name="Pools", category="Admin")
admin.add_view(mv)
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
mv = SlaMissModelView(
models.SlaMiss, Session, name="SLA Misses", category="Browse")
admin.add_view(mv)
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import (
admin_views, flask_blueprints, menu_links)
for v in admin_views:
admin.add_view(v)
for bp in flask_blueprints:
print(bp)
app.register_blueprint(bp)
for ml in menu_links:
admin.add_link(ml)
integrate_plugins()
| apache-2.0 |
wlamond/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
subutai/NAB | nab/corpus.py | 7 | 7565 | # ----------------------------------------------------------------------
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This contains the objects to store and manipulate a database of csv files.
"""
import copy
import os
import pandas
from nab.util import (absoluteFilePaths,
createPath)
class DataFile(object):
"""
Class for storing and manipulating a single datafile.
Data is stored in pandas.DataFrame
"""
def __init__(self, srcPath):
"""
@param srcPath (string) Filename of datafile to read.
"""
self.srcPath = srcPath
self.fileName = os.path.split(srcPath)[1]
self.data = pandas.io.parsers.read_csv(self.srcPath,
header=0, parse_dates=[0])
def write(self, newPath=None):
"""Write datafile to self.srcPath or newPath if given.
@param newPath (string) Path to write datafile to. If path is not given,
write to source path
"""
path = newPath if newPath else self.srcPath
self.data.to_csv(path, index=False)
def modifyData(self, columnName, data=None, write=False):
"""Add columnName to datafile if data is given otherwise remove
columnName.
@param columnName (string) Name of the column in the datafile to
either add or remove.
@param data (pandas.Series) Column data to be added to datafile.
Data length should be as long as the
length of other columns.
@param write (boolean) Flag to choose whether to write modifications to
source path.
"""
if isinstance(data, pandas.Series):
self.data[columnName] = data
else:
if columnName in self.data:
del self.data[columnName]
if write:
self.write()
def getTimestampRange(self, t1, t2):
"""Given timestamp range, get all records that are within that range.
@param t1 (int) Starting timestamp.
@param t2 (int) Ending timestamp.
@return (list) Timestamp and value for each time stamp within the
timestamp range.
"""
tmp = self.data[self.data["timestamp"] >= t1]
ans = tmp[tmp["timestamp"] <= t2]["timestamp"].tolist()
return ans
def __str__(self):
ans = ""
ans += "path: %s\n" % self.srcPath
ans += "file name: %s\n"% self.fileName
ans += "data size: ", self.data.shape()
ans += "sample line: %s\n" % ", ".join(self.data[0])
return ans
class Corpus(object):
"""
Class for storing and manipulating a corpus of data where each datafile is
stored as a DataFile object.
"""
def __init__(self, srcRoot):
"""
@param srcRoot (string) Source directory of corpus.
"""
self.srcRoot = srcRoot
self.dataFiles = self.getDataFiles()
self.numDataFiles = len(self.dataFiles)
def getDataFiles(self):
"""
Collect all CSV data files from self.srcRoot directory.
@return (dict) Keys are relative paths (from self.srcRoot) and values are
the corresponding data files.
"""
filePaths = absoluteFilePaths(self.srcRoot)
dataSets = [DataFile(path) for path in filePaths if ".csv" in path]
def getRelativePath(srcRoot, srcPath):
return srcPath[srcPath.index(srcRoot)+len(srcRoot):]\
.strip(os.path.sep).replace(os.path.sep, "/")
return {getRelativePath(self.srcRoot, d.srcPath) : d for d in dataSets}
def addColumn(self, columnName, data, write=False):
"""
Add column to entire corpus given columnName and dictionary of data for each
file in the corpus. If newRoot is given then corpus is copied and then
modified.
@param columnName (string) Name of the column in the datafile to add.
@param data (dict) Dictionary containing key value pairs of a
relative path and its corresponding
datafile (as a pandas.Series).
@param write (boolean) Flag to decide whether to write corpus
modificiations or not.
"""
for relativePath in self.dataFiles.keys():
self.dataFiles[relativePath].modifyData(
columnName, data[relativePath], write=write)
def removeColumn(self, columnName, write=False):
"""
Remove column from entire corpus given columnName. If newRoot if given then
corpus is copied and then modified.
@param columnName (string) Name of the column in the datafile to add.
@param write (boolean) Flag to decide whether to write corpus
modificiations or not.
"""
for relativePath in self.dataFiles.keys():
self.dataFiles[relativePath].modifyData(columnName, write=write)
def copy(self, newRoot=None):
"""Copy corpus to a newRoot which cannot already exist.
@param newRoot (string) Location of new directory to copy corpus
to.
"""
if newRoot[-1] != os.path.sep:
newRoot += os.path.sep
if os.path.isdir(newRoot):
print "directory already exists"
return None
else:
createPath(newRoot)
newCorpus = Corpus(newRoot)
for relativePath in self.dataFiles.keys():
newCorpus.addDataSet(relativePath, self.dataFiles[relativePath])
return newCorpus
def addDataSet(self, relativePath, dataSet):
"""Add datafile to corpus given its realtivePath within the corpus.
@param relativePath (string) Path of the new datafile relative to
the corpus directory.
@param datafile (datafile) Data set to be added to corpus.
"""
self.dataFiles[relativePath] = copy.deepcopy(dataSet)
newPath = self.srcRoot + relativePath
createPath(newPath)
self.dataFiles[relativePath].srcPath = newPath
self.dataFiles[relativePath].write()
self.numDataFiles = len(self.dataFiles)
def getDataSubset(self, query):
"""
Get subset of the corpus given a query to match the datafile filename or
relative path.
@param query (string) Search query for obtainin the subset of
the corpus.
@return (dict) Dictionary containing key value pairs of a
relative path and its corresponding
datafile.
"""
ans = {}
for relativePath in self.dataFiles.keys():
if query in relativePath:
ans[relativePath] = self.dataFiles[relativePath]
return ans
| agpl-3.0 |
PatrickChrist/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
jstooks9/mpp | 2d_histogram.py | 1 | 2549 | # create 2D histogram, where histogram height is displayed as color
import numpy as np
import matplotlib.pyplot as plt
import sys
# Arguments:
# 1 - data filename
# 2 - number of bins
# 3 - figure title
# 4 - cutoff for # of stds (default 4)
def make_histogram(data, bins):
range = max(data) - min(data)
binWidth = range / float(bins)
histogram = [0]*bins # initialize histogram
for d in data:
index = int(d / binWidth) # calculate which bin to tally
histogram[index] += 1
binList = [0] # initialize list of upper bin boundaries
for i in range(0,bins):
binList.append(binWidth + binWidth*i) # populate binList with
# upper bin boundary values
return binList, histogram # return x,y data for the histogram
def readParsedFile(filename):
xList = []
yList = []
numSTDs = 4
with open(filename) as f:
while True:
currLine = f.readline()
if currLine == '':
break
xList.append(int(currLine.split()[0]))
yList.append(int(currLine.split()[1]))
return xList, yList
##################################################
# Main Program
##################################################
FILEEXTENSIONLENGTH = 4
DEFAULTSTDS = 5
#________________________________________________|
# Inputs #|
inputfilename = sys.argv[1] #|
nbins = int(sys.argv[2]) #|
figureTitle = sys.argv[3] #|
if len(sys.argv) == 5: #|
stds = float(sys.argv[4]) #|
else: #|
stds = DEFAULTSTDS #|
#________________________________________________|
figureName = inputfilename[:-FILEEXTENSIONLENGTH] + '_plot.png'
x, y = readParsedFile(inputfilename)
stdX = np.std(x)
meanX = np.mean(x)
maxX = meanX + (stdX * stds)
minX = meanX - (stdX * stds)
# maxX = 3600
# minX = 0
stdY = np.std(y)
meanY = np.mean(y)
maxY = meanY + (stdY * stds)
minY = meanY - (stdY * stds)
# maxY = 2500
# minY = 500
trimmedX = []
trimmedY = []
for i, j in zip(x,y):
if i < minX or i > maxX or j < minY or j > maxY:
continue
trimmedX.append(i)
trimmedY.append(j)
H, xedges, yedges = np.histogram2d(trimmedX, trimmedY, bins = nbins)
H = np.rot90(H)
H = np.flipud(H)
Hmasked = np.ma.masked_where(H==0,H)
fig = plt.figure()
plt.set_cmap("spectral")
plt.pcolormesh(xedges,yedges,Hmasked)
plt.ylabel('TAC')
plt.xlabel('Amplitude')
plt.title(figureTitle)
cbar = plt.colorbar()
plt.savefig(figureName)
print('Figure saved as', figureName) | apache-2.0 |
siutanwong/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
DonBeo/statsmodels | statsmodels/sandbox/examples/ex_kaplan_meier.py | 33 | 2838 | #An example for the Kaplan-Meier estimator
from __future__ import print_function
from statsmodels.compat.python import lrange
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.sandbox.survival2 import KaplanMeier
#Getting the strike data as an array
dta = sm.datasets.strikes.load()
print('basic data')
print('\n')
dta = list(dta.values()[-1])
print(dta[lrange(5),:])
print('\n')
#Create the KaplanMeier object and fit the model
km = KaplanMeier(dta,0)
km.fit()
#show the results
km.plot()
print('basic model')
print('\n')
km.summary()
print('\n')
#Mutiple survival curves
km2 = KaplanMeier(dta,0,exog=1)
km2.fit()
print('more than one curve')
print('\n')
km2.summary()
print('\n')
km2.plot()
#with censoring
censoring = np.ones_like(dta[:,0])
censoring[dta[:,0] > 80] = 0
dta = np.c_[dta,censoring]
print('with censoring')
print('\n')
print(dta[lrange(5),:])
print('\n')
km3 = KaplanMeier(dta,0,exog=1,censoring=2)
km3.fit()
km3.summary()
print('\n')
km3.plot()
#Test for difference of survival curves
log_rank = km3.test_diff([0.0645,-0.03957])
print('log rank test')
print('\n')
print(log_rank)
print('\n')
#The zeroth element of log_rank is the chi-square test statistic
#for the difference between the survival curves for exog = 0.0645
#and exog = -0.03957, the index one element is the degrees of freedom for
#the test, and the index two element is the p-value for the test
wilcoxon = km3.test_diff([0.0645,-0.03957], rho=1)
print('Wilcoxon')
print('\n')
print(wilcoxon)
print('\n')
#Same info as log_rank, but for Peto and Peto modification to the
#Gehan-Wilcoxon test
#User specified functions for tests
#A wider range of rates can be accessed by using the 'weight' parameter
#for the test_diff method
#For example, if the desire weights are S(t)*(1-S(t)), where S(t) is a pooled
#estimate for the survival function, this could be computed by doing
def weights(t):
#must accept one arguement, even though it is not used here
s = KaplanMeier(dta,0,censoring=2)
s.fit()
s = s.results[0][0]
s = s * (1 - s)
return s
#KaplanMeier provides an array of times to the weighting function
#internally, so the weighting function must accept one arguement
test = km3.test_diff([0.0645,-0.03957], weight=weights)
print('user specified weights')
print('\n')
print(test)
print('\n')
#Groups with nan names
#These can be handled by passing the data to KaplanMeier as an array of strings
groups = np.ones_like(dta[:,1])
groups = groups.astype('S4')
groups[dta[:,1] > 0] = 'high'
groups[dta[:,1] <= 0] = 'low'
dta = dta.astype('S4')
dta[:,1] = groups
print('with nan group names')
print('\n')
print(dta[lrange(5),:])
print('\n')
km4 = KaplanMeier(dta,0,exog=1,censoring=2)
km4.fit()
km4.summary()
print('\n')
km4.plot()
#show all the plots
plt.show()
| bsd-3-clause |
soulmachine/scikit-learn | doc/sphinxext/gen_rst.py | 2 | 38923 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
from textwrap import dedent
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = open(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery",
"Please check your example's layout",
" and make sure it's correct")
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
width: 0px;
overflow: hidden;
}
</style>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = open(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and check_docstring):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer">
<div class="docstringWrapper">
""")
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
<p>%s
</p></div>
</div>
""" % (ref_name, snippet))
return ''.join(out)
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(dir, 'images', 'thumb')):
os.makedirs(os.path.join(dir, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(dir, dir, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (dir, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', dir)
ex_file.write(_thumbnail_div(dir, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
time_m = 0
time_s = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_mngr.num)
plt.savefig(image_path % fig_mngr.num)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
example_code_obj = identify_names(open(example_file).read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
try:
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding due to a URL Error: \n")
print(e.args)
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
dsullivan7/scikit-learn | sklearn/metrics/pairwise.py | 13 | 41710 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=True)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
ltiao/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
bzamecnik/ml | instrument-classification/inspect_errors.py | 2 | 1780 | from argparse import ArgumentParser
import os
import pandas as pd
import shutil
import subprocess
from prepare_training_data import load_transformers
def inspect_errors(model_id):
model_dir = 'data/working/single-notes-2000/models/' + model_id
input_dir = 'data/prepared/single-notes-2000'
output_dir = model_dir + '/evaluation/errors'
os.makedirs(output_dir, exist_ok=True)
df = pd.read_csv(model_dir + '/output-data/predictions.csv')
errors = df.query("split == 'valid' & ~accurate")[:]
instr_family_le = load_transformers(model_dir)[0]
errors['sample_id'] = errors.index.map(lambda i: '%06d' % i)
errors['label_true'] = errors['y_true'].apply(lambda y_true: instr_family_le.inverse_transform(y_true))
errors['label_pred'] = errors['y_pred'].apply(lambda y_pred: instr_family_le.inverse_transform(y_pred))
errors['input_file'] = errors['sample_id'].apply(lambda sample_id: input_dir + '/sample_%s.flac' % sample_id)
errors['output_file'] = errors.apply(lambda row: output_dir + '/sample_%s_%s_%s_%s.flac'
% (row['split'], row['sample_id'], row['label_true'], row['label_pred']), axis=1)
print(errors[['sample_id', 'label_true', 'label_pred']])
# rename the files so that they are more legible
for i, row in errors.iterrows():
shutil.copy(row['input_file'], row['output_file'])
# play the files (this is probably limited so some max command length)
subprocess.call(['open'] + list(errors['output_file']))
def parse_args():
parser = ArgumentParser('Inspect errors - listen to misclassified audio files.')
parser.add_argument('model_dir', metavar='MODEL_DIR')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
inspect_errors(args.model_dir)
| mit |
luca-heltai/ePICURE | applications/bspline_spiral.py | 1 | 1136 | from interfaces import *
from utilities import *
from numpy import *
from numpy.linalg import lstsq
from matplotlib.pylab import *
from mpl_toolkits.mplot3d import Axes3D
# Spiral parameters:
nturns = 6.0
heigth = 1.0
radius = 1.0
# Spiral analytical expression
cx = lambda x: radius*sin(nturns*2*pi*x)
cy = lambda y: radius*cos(nturns*2*pi*y)
cz = lambda z: heigth*z
# BSpline parameters
n = 21
p = 3
# Number of least square points
n_ls = 140
# Open knot vector
knots = r_[p*[0], linspace(0,1,n), p*[1]]
vs = BsplineVectorSpace(p, knots)
# Least square parameter points
t = linspace(0,1,n_ls)
# Least square points of the curve
F = array([cx(t), cy(t), cz(t)])
# Least square matrix
M = interpolation_matrix(vs, t)
# Control points and curve
CP = lstsq(M, F.T)[0]
CP2 = least_square_by_points(vs, F, t)
print (np.abs(CP - CP2))
curve = vs.element(CP)
# Approximated curve at points
C = curve(t)
# Plot curve, approximated curve and control polygon
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(C[0,:], C[1,:], C[2,:])
ax.plot(F[0,:], F[1,:], F[2,:])
ax.plot(CP[:,0], CP[:,1], CP[:,2], 'o-')
savefig('fig.png')
| gpl-2.0 |
zedyang/OANDA-Forex | api.py | 2 | 28036 | #encoding: UTF-8
import json
import requests
import pandas as pd
import time
from datetime import datetime, timedelta
from Queue import Queue, Empty, PriorityQueue
from threading import Thread, Timer
from errors import (OANDA_RequestError, OANDA_EnvError,
OANDA_DataConstructorError)
class Config(object):
"""
Json-like config object.
The Config() contains all kinds of settings and user info that
could be useful in the implementation of Api wrapper.
privates
--------
* head: string; the name of config file.
* token: string; user's token.
* body: dictionary; the main content of config
- username
- password
- account_id: integer; account ID used for requests.
- domain: api domain.
- domain_stream: stream domain.
- ssl: boolean, specifes http or https usage.
- version: ='v1'
- header: dict; request header.
* env: string; 'sandbox' or 'practice' or 'real',
specifies environment of the api.
Environments reference Domain/Domain_stream
* Real Money api/stream-fxtrade.oanda.com
* practices api/stream-fxpractice.oanda.com
* sandbox api/stream-sandbox.oanda.com
"""
head = "my token"
token = '4c56cbf8105642050bbfdb36aad29c6a-' + \
'77dfc84d1fc6a2ced8e1b15641d0d69e'
body = {
'username': 'geonaroben',
'password': 'DequejHid&',
'account_id': 2804581,
'domain': 'api-sandbox.oanda.com', # sandbox environment
'domain_stream': 'stream-sandbox.oanda.com',
'ssl': False, # http or https.
'version': 'v1',
'header': {
"Content-Type" : "application/x-www-form-urlencoded",
'Connection' : 'keep-alive',
'Authorization' : 'Bearer ' + token,
'X-Accept-Datetime-Format' : 'unix'
}
}
def __init__(self, head=0, token=0, body=0, env='sandbox'):
"""
Reload constructor.
parameters
----------
* head: string; the name of config file.
* token: string; user's token.
* body: dictionary; the main content of config
* env: string; 'sandbox' or 'practice' or 'real',
specifies environment of the api.
"""
if head:
self.head = head
if token:
self.token = token
if body:
self.body = body
# environment settings.
if env == 'sandbox':
self.body['ssl'] = False
elif env == 'practice':
self.body['ssl'] = True
def view(self):
""" Prettify printing method. """
config_view = {
'config_head' : self.head,
'config_body' : self.body,
'user_token' : self.token
}
print json.dumps(config_view,
indent=4,
sort_keys=True)
#----------------------------------------------------------------------
# Data containers.
class BaseDataContainer(object):
"""
Basic data container.
privates
--------
* head: string; the head(type) of data container.
* body: dictionary; data content. Among all sub-classes that inherit
BaseDataContainer, type(body) varies according to the financial meaning
that the child data container stands for.
- Tick:
- Bar:
- HistBar:
"""
head = 'ABSTRACT_DATA'
body = dict()
pass
class HeartBeat(BaseDataContainer):
"""
HeartBeat is almost an empty container, carries nothing but a timestamp.
privates
--------
* head: string, inherited from BaseDataContainer, equals 'TIME'.
* time: integer, a Unix timestamp.
* dt: datetime.datetime() object.
"""
head = 'TIME'
time = -1
dt = -1
def __init__(self, data):
"""
"""
try:
assert 'heartbeat' in data
self.body = data['heartbeat']
self.time = data['heartbeat']['time']
self.dt = datetime.fromtimestamp(int(self.time)/1000000)
except AssertionError:
msg = '[HEARTBEAT]: Unable to construct empty heartbeat; ' + \
'input is not heartbeat.'
raise OANDA_DataConstructorError(msg)
except Exception,e:
msg = '[HEARTBEAT]: Unable to construct heartbeat; ' + str(e)
raise OANDA_DataConstructorError(msg)
class Tick(BaseDataContainer):
"""
Tick data container.
Usually, tick containers are initialzed from a market impulse,
i.e. the MarketEvent() object, which takes two forms, real 'tick' and
empty heartbeat. The stream maker will automatically filter empty
heartbeat apart (and construct a HeartBeat(BaseDataContainer) for them)
Therefore at the level of this class, there is no need for screening
the content of data when constructing.
when tick is constructed, all privates except Tick.dt take values
from Json-like data, dt is converted from time,
using datetime.fromtimestamp()
privates
--------
* head: string; inherited from BaseDataContainer, equals 'TICK'.
* bid: float; bid price for this tick.
* ask: float; ask price for this tick.
* instrument: string; instrument ID.
* time: integer; Unix timestamp.
* dt: datetime.datetime() object.
"""
# place holders.
head = 'TICK'
bid = -1.0
ask = -1.0
instrument = ''
time = -1
dt = -1
def __init__(self, data):
"""
Constructor
parameters
----------
* data: dict; the market data.
it ##should## be like:
{u'tick': {
u'ask': 1.2408,
u'instrument': u'EUR_USD',
u'bid': 1.24065,
u'time': u'1438665311084691'
}}
"""
try:
assert 'tick' in data
self.body = data['tick']
self.bid = data['tick']['bid']
self.ask = data['tick']['ask']
self.instrument = data['tick']['instrument']
self.time = data['tick']['time']
self.dt = datetime.fromtimestamp(int(self.time)/1000000)
except AssertionError:
msg = '[TICK]: Unable to construct tick; ' + \
'input is not a tick.'
raise OANDA_DataConstructorError(msg)
except Exception,e:
msg = '[TICK]: Unable to construct tick; ' + str(e)
raise OANDA_DataConstructorError(msg)
def view(self):
"""
view data method.
"""
tick_view = {
'datetime': str(self.dt),
'time': self.time,
'instrument': self.instrument,
'bid': self.bid,
'ask': self.ask
}
print json.dumps(tick_view,
indent=4,
sort_keys=True)
class Bar(BaseDataContainer):
"""
Bar data container.
Bar inherently carries two ohlc bars, one is construct from bid price
in the ticks, the other using ask price.
It maintains these two bars for a predefined time span, and
dynamcially stack received Tick data object to update the bars amid
that time span by calling self.push(tick).
At tick level, all empty heartbeat impulses have already been filtered out.
So there is no need to assert tick!=heartbeat when constructing bar.
when Bar is constructed, the start time, time span(thus the end time)
are exogenously given. So the timestamp in the initial tick is not
referred to.
privates
--------
* head: string; inherited from BaseDataContainer, equals 'BAR'.
* bid_open, bid_high, bid_low, bid_close: floats; an ohlc bar
for the bid side.
* ask_open, ask_high, ask_low, ask_close: floats; an ohlc bar
for the ask side.
* instrument: string; instrument ID.
* start, end: datetime.datetime() object; defines the start and end
for the maintainance. These are calculated when constucted, and remain
constant during the lifespan of self.
* span: datetime.timedelta() object; marks the time range of bar,
exogenously given in the constructor, default is 1 minute. It's also a
constant during the Bar instances' lifespan.
"""
head = 'BAR'
span = timedelta(minutes=1)
start = -1
end = -1
bid_open = -1
bid_high = -1
bid_low = -1
bid_close = -1
ask_open = -1
ask_high = -1
ask_low = -1
ask_close = -1
instrument = ''
def __init__(self, tick, start, span=timedelta(minutes=1)):
"""
constructor.
parameters
----------
* tick: Tick(BaseDataContainer) object; the first tick used to
initialze the bar object.
* start: datetime.datetime() object; the starting mark.
* span: datetime.timedelta() object; specifies time span of this bar,
default value is 1 minute.
"""
self.span = span
self.start = start
self.end = start + span
try:
# !only uses bid ask in tick, start was transferred separately.
assert type(tick) == Tick
bid, ask = tick.bid, tick.ask
self.bid_open, self.bid_high, self.bid_low, self.bid_close = \
bid, bid, bid, bid
self.ask_open, self.ask_high, self.ask_low, self.ask_close = \
ask, ask, ask, ask
except AssertionError:
msg = '[BAR]: Unable to construct bar; ' + \
'bar must be initialized with a tick.'
raise OANDA_DataConstructorError(msg)
except Exception,e:
msg = '[BAR]: Unable to construct bar; ' + str(e)
raise OANDA_DataConstructorError(msg)
def view(self):
"""
view data method.
"""
bar_view = {
'start': str(self.start),
'end': str(self.end),
'bid_ohlc': [self.bid_open, self.bid_high,
self.bid_low, self.bid_close],
'ask_ohlc': [self.ask_open, self.ask_high,
self.ask_low, self.ask_close]
}
print json.dumps(bar_view,
indent=4,
sort_keys=True)
def push(self, tick):
"""
push new tick into bar, update bar data.
parameters
----------
* tick: Tick(BaseDataContainer) object; the tick to be updated.
returnCode
----------
* 1: tick was updated.
* 0: tick was not updated, since tick.time not in bar time range.
"""
tick_time = tick.dt
if tick_time < self.end and tick_time >= self.start:
bid, ask = tick.bid, tick.ask
self.bid_high, self.bid_low, self.bid_close = \
max(self.bid_high, bid), min(self.bid_low, bid), bid
self.ask_high, self.ask_low, self.ask_close = \
max(self.ask_high, ask), min(self.ask_low, ask), ask
return 1
else:
return 0
class HistBar(BaseDataContainer):
"""
"""
head = 'HISTBAR'
body = pd.DataFrame()
pass
#----------------------------------------------------------------------
# Event containers.
class BaseEvent(object):
"""
Base level event object.
"""
head = 'ETYPE_NONE'
body = dict()
def view(self):
""" print method. """
print '[{}]: {}'.format(self.head, self.body)
class MarketEvent(BaseEvent):
"""
Market impulse event.
parameters
----------
* is_empty: boolean, False as default;
specify whether this is mere a heartbeat event.
* data: dict; the market data.
"""
head = 'ETYPE_MKT'
is_heartbeat = False
def __init__(self, data=dict(), is_heartbeat=False):
self.body = data
self.is_heartbeat = is_heartbeat
class BarEvent(BaseEvent):
"""
Bar data event, body is a Bar() object.
parameters
----------
* data: Bar() object.
"""
head = 'ETYPE_BAR'
instrument = ''
def __init__(self, data):
try: # examine the type of input data.
assert type(data) == Bar
self.body = data
self.instrument = data.instrument
except AssertionError:
msg = '[BAREVENT]: Unable to construct bar event; ' + \
'input data must be a Bar object.'
raise OANDA_DataConstructorError(msg)
class SignalEvent(BaseEvent):
"""
"""
head = 'ETYPE_SGNL'
pass
class OrderEvent(BaseEvent):
"""
"""
head = 'ETYPE_ODR'
pass
class FillEvent(BaseEvent):
"""
"""
head = 'ETYPE_FLL'
pass
class EventQueue(object):
"""
Generic EventQueue implementation, maintains main event
queue of the system; register functions to speecific events;
push events and distribute em to listeners.
privates
--------
* _queue: Queue.Queue object; the main event queue.
* _active_flag: boolean; whether active or not.
* _thrd: threading.Thread object; event engine thread.
* _listeners: dictionary;
mapping from event type to handlers' list,
shaped like: {'ETYPE_ODR': [<func1>, <func2],
'ETYPE_SGNL': [<func_handle_sign>]}
"""
# event queue and timer instances.
_queue = Queue()
# empty place holders.
_active_flag = False
_thrd = None
_listeners = dict()
def __init__(self):
""" Constructor """
self._thrd = Thread(target=self.distribute, name='_THRD_EVENT')
def put(self, event):
"""
put an event into queue
parameters
----------
* event: a BaseEvent or its subclass instance.
"""
self._queue.put(event)
def open(self):
""" open the queue. """
self._active_flag = True
self._thrd.start()
def kill(self):
""" suspend engine. """
if self._active_flag:
self._active_flag = False
self._thrd.join()
def bind(self, event_head, func):
"""
Register a speecific function as a listener to some
type of events, events of this type will be distributed
to function when get() from queue.
parameters
----------
* event_head: string; an 'ETYPE_###' declaration.
* func: function; noticing that **kwargs has only event.
i.e. f(event).
"""
if event_head not in self._listeners:
self._listeners[event_head] = []
if func not in self._listeners[event_head]:
self._listeners[event_head].append(func)
return self._listeners
def distribute(self):
""" distribute events by listeners mapping. """
while self._active_flag:
try:
event = self._queue.get()
if event.head in self._listeners:
[f(event) for f in self._listeners[event.head]]
except Empty:
pass
#----------------------------------------------------------------------
# OANDA Api class
class PyApi(object):
"""
Python based OANDA Api object.
The main bridge that connects local scripts & logics with OANDA server.
It acts as a comprehensive data generator, cleaner and distributer;
and also includes trading methods.
Wraps
- OANDA Get data requests, returns the response.
- Make stream requests, iterate data lines in stream resp.
- Trading requests.
- Clean data functionalities.
- EventQueue that distribtes data, received and cleaned, to listening
handlers/strategies.
PyApi is initialzed with a dict of {event type |-> event queue} mapping
and a Config json. Note that the config file must be complete. In that
once constructed, the private variables like request headers, tokens, etc
become constant values (inherited from Config). These privates will be
consistantly called whenever talk to OANDA server.
The names(keys) of EventQueue in construction parameter queues should be
lower case string '###', taken from 'ETYPE_###'. Because these keys are
directly referred to in this manner in the scripts.
privates
--------
* _config: Config() object; a container of all useful settings when making
requests.
* _event_queues: dictionary; a mapping from event type abbreivation to
coresponing event queue that stores these events.
- example: _event_queues = {
'mkt': None, # ETYPE_MKT
'bar': None # ETYPE_BAR
}
* _ssl, _domain, _domain_stream, _version, _header, _account_id:
boolean, string, string, string, dictionary, integer;
just private references to the items in Config. See the docs of Config().
* _session: requests.session() object.
* _curr_bar: Bar() object; the current bar data maintained when requests
stream data. It is dynamically updated and reset on new market impulses.
examples
--------
>> q1 = EventQueue()
>> q2 = EventQueue()
>> q = {'mkt': q1, 'bar': q2}
>> mystrat = BaseStrategy()
>> myapi = PyApi(Config(), q)
>> q1.bind('ETYPE_MKT', myapi.on_market_impulse)
>> q2.bind('ETYPE_BAR', mystrat.on_bar)
>> ... # more binding to handlers.
>> q1.open()
>> q2.open()
>> myapi.make_stream('EUR_USD')
>> ...
"""
_config = Config()
_event_queues = {
'mkt': None, # ETYPE_MKT
'bar': None # ETYPE_BAR
}
# request stuffs
_ssl = False
_domain = ''
_domain_stream = ''
_version = 'v1'
_header = dict()
_account_id = None
_session = requests.session()
# pointer to the bar that was currently maintained.
_curr_bar = None
def __init__(self, config, queues):
"""
Constructor.
parameters
----------
* config: Config object; specifies user and connection configs.
* queues: A dictionary of Event Queue objects; shaped like
{
'mkt': q1, # ETYPE_MKT
'bar': q2 # ETYPE_BAR
}
as containers of the events loaded from the stream.
"""
self._event_queues = queues
if config.body:
self._config = config
self._ssl = config.body['ssl']
self._domain = config.body['domain']
self._domain_stream = config.body['domain_stream']
self._version = config.body['version']
self._header = config.body['header']
self._account_id = config.body['account_id']
# configure protocol
if self._ssl:
self._domain = 'https://' + self._domain
self._domain_stream = 'https://' + self._domain_stream
else:
self._domain = 'http://' + self._domain
self._domain_stream = 'http://' + self._domain_stream
def _access(self, url, params, method='GET'):
"""
request specific data at given url with parameters.
parameters
----------
* url: string.
* params: dictionary.
* method: string; 'GET' or 'POST', request method.
"""
try:
assert type(url) == str
assert type(params) == dict
except AssertionError,e:
raise e('[API]: Unvalid url or parameter input.')
if not self._session:
s = requests.session()
else: s = self._session
# prepare and send the request.
try:
if method == 'GET':
req = requests.Request(method,
url = url,
headers = self._header,
params = params)
elif method == 'POST':
req = requests.Request(method,
url = url,
headers = self._header,
data = params)
# for POST, params should be included in request body.
prepped = s.prepare_request(req) # prepare the request
resp = s.send(prepped, stream=False, verify=True)
if method == 'GET':
assert resp.status_code == 200
elif method == 'POST':
# note that respcode for POST is still 200!
assert resp.status_code == 200
return resp
except AssertionError:
msg = '[API]: Bad request, unexpected response status: ' + \
str(resp.status_code)
raise OANDA_RequestError(msg)
pass
except Exception,e:
msg = '[API]: Bad request.' + str(e)
raise OANDA_RequestError(msg)
def _put_market_event(self, data):
"""
put the market impulse into the event queue.
parameters:
----------
* data: dictionary; resp.json() object.
"""
# create event.
try:
if 'heartbeat' in data:
event = MarketEvent(data, is_heartbeat=True)
elif 'tick' in data:
event = MarketEvent(data, is_heartbeat=False)
self._event_queues['mkt'].put(event)
return 1
except Exception,e:
msg = '[API]: Failed to put market event; ' + str(e)
return -1
def _put_bar_event(self, bar=-1):
"""
put currently maintained bar into event queue.
parameters:
----------
* bar: Bar() object; the data that is to be put into BarEvent()
event object. -1 as Default: point to the currently maintained bar,
i.e. self._curr_bar.
"""
try:
if bar == -1:
if self._curr_bar:
event = BarEvent(data = self._curr_bar)
else:
event = BarEvent(data = bar)
self._event_queues['bar'].put(event)
return 1
except Exception,e:
msg = '[API]: Failed to put bar event; ' + str(e)
return -1
#----------------------------------------------------------------------
# make market data stream.
def make_stream(self, instruments):
"""
subscribe market impulses and make stream.
parameters
----------
* instruments: string; the ticker(s) of instrument(s), connected by
coma. Example: 'EUR_USD, USD_CAD'
"""
try:
assert type(instruments) == str
except AssertionError,e:
raise e('[API]: Unvalid instruments input.')
s = requests.session()
url = '{}/{}/prices'.format(self._domain_stream, self._version)
params = {
'accountId': self._account_id,
'instruments': instruments
}
# prepare and send the request.
try:
req = requests.Request('GET',
url = url,
headers = self._header,
params = params)
prepped = s.prepare_request(req) # prepare the request
resp = s.send(prepped, stream=True, verify=True)
assert resp.status_code == 200
print '[API]: Stream established.'
except AssertionError:
msg = '[API]: Bad request, unexpected response status: ' + \
str(resp.status_code)
raise OANDA_RequestError(msg)
except Exception,e:
msg = '[API]: Bad request.' + str(e)
raise OANDA_RequestError(msg)
# Iter-lines in resp.
for line in resp.iter_lines(90):
if line:
try:
data = json.loads(line)
#!! put market impulse into the event queue.
self._put_market_event(data)
except Exception,e:
print '[API]: Stream iterLine Error, ' + str(e)
pass
#----------------------------------------------------------------------
# get methods.
#----------------------------------------------------------------------
# Market side
def get_instruments(self):
"""
get list of instruments.
"""
url = '{}/{}/instruments'.format(self._domain, self._version)
params = {
'accountId': self._account_id
}
try:
resp = self._access(url=url, params=params)
assert len(resp.json()) > 0
return resp.json()
except AssertionError: return 0
def get_history(self, instrument, granularity, candle_format='bidask',
count=500, daily_alignment=None,
alignment_timezone=None, weekly_alignment="Monday",
start = None, end = None):
"""
retrieve historical bar data of certain instrument.
parameters
----------
* instrument: string; the ticker of instrument.
* granularity: string; sample rate of bar data. Examples:
- 'S10' 10-seconds bar.
- 'M1' 1-minute bar.
- 'H3' 3-hours bar.
- 'D'/'W'/'M' day/week/month(one).
* candle_format: string; candlestick representation, either:
- 'bidask' (default), the Bid/Ask based candlestick.
- 'midpoint', the midpoint based candlestick.
* count: integer; the number of bars to be retrieved, maximum is
5000, should not be specified if both start and end are specified.
* daily_alignment: integer; the hour of day used to align candles
with hourly, daily, weekly, or monthly granularity. Note that
The value specified here is interpretted as an hour in the timezone
set through the alignment_timezone parameter.
* alignment_timezone: string; timezone used for the dailyAlignment.
* weekly_alignment: string; the day of the week used to align candles
with weekly granularity.
* start, end: string; timestamp for the range of candles requested.
"""
url = '{}/{}/candles'.format(self._domain, self._version)
params = {
'accountId': self._account_id,
'instrument': instrument,
'granularity': granularity,
'candleFormat': candle_format,
'count': count,
'dailyAlignment': daily_alignment,
'alignmentTimezone': alignment_timezone,
'weeklyAlignment': weekly_alignment,
'start': start,
'end': end
}
try:
resp = self._access(url=url, params=params)
assert len(resp.json()) > 0
return resp.json()
except AssertionError: return 0
def get_prices(self, instruments):
"""
get a prices glance (not using stream api)
parameters
----------
* instruments: string.
"""
url = '{}/{}/prices'.format(self._domain, self._version)
params = {
'accountId': self._account_id,
'instruments': instruments
}
try:
resp = self._access(url=url, params=params)
assert len(resp.json()) > 0
return resp.json()
except AssertionError: return 0
#----------------------------------------------------------------------
# Trader side
def create_sandbox_acc(self):
"""
Create a sandbox test account.
"""
if self._ssl == False:
url = '{}/{}/accounts'.format(self._domain, self._version)
try:
resp = self._access(url=url, params=dict(), method='POST')
assert len(resp.json()) > 0
return resp.json()
except AssertionError: return 0
else:
msg = '[API]: create_sandbox_acc() method cannot be invoked ' + \
'within other than sandbox environment.'
raise OANDA_EnvError(msg)
def get_account_info(self, account_id=-1):
"""
Get infomation of specific account.
parameters
----------
* account_id: string or integer; default is -1
(use account_id in config)
"""
if account_id == -1:
account_id = self._config.body['account_id']
url = '{}/{}/accounts/{}'.format(self._domain,
self._version, account_id)
try:
resp = self._access(url=url, params=dict(), method='GET')
assert len(resp.json()) > 0
return resp.json()
except AssertionError: return 0
def get_positions(self):
"""
Get a list of all positions.
"""
url = '{}/{}/accounts/{}/positions'.format(self._domain,
self._version, self._account_id)
params = {
'accountId': self._account_id,
}
try:
resp = self._access(url=url, params=params)
assert len(resp.json()) > 0
return resp.json()
except AssertionError: return 0
def get_orders(self, instrument=None, count=50):
"""
Get all PENDING orders for an account.
Note that pending take-profit or stop-loss orders are recorded
in the open trade object.
parameters
----------
* instruments: string; default is all.
* count: integer; maximum number of open orders to return.
"""
url = '{}/{}/accounts/{}/orders'.format(self._domain,
self._version, self._account_id)
params = {
'instrument': instrument,
'count': count
}
try:
resp = self._access(url=url, params=params)
assert len(resp.json()) > 0
return resp.json()
except AssertionError: return 0
def get_trades(self, instrument=None, count=50):
"""
Get list of open trades.
parameters
----------
* instruments: string; default is all.
* count: integer; maximum number of open orders to return.
"""
url = '{}/{}/accounts/{}/trades'.format(self._domain,
self._version, self._account_id)
params = {
'instrument': instrument,
'count': count
}
try:
resp = self._access(url=url, params=params)
assert len(resp.json()) > 0
return resp.json()
except AssertionError: return 0
def place_order(self, instrument, side, units, price, type):
"""
place an order through OANDA API.
parameters
----------
* instrument: string; the name of instrument.
* side: string; 'buy'/'sell'
* units: integer; the amount to be traded.
* type: string;
- 'market': market order, trade by current market price.
- 'limit': limit order, trade when crossing specified price.
* price: float; price to excute limit order.
"""
url = '{}/{}/accounts/{}/orders'.format(self._domain,
self._version, self._account_id)
params = {
'instrument': instrument,
'side': side,
'units': units,
'price': price,
'type': type,
'expiry' : None
}
try:
resp = self._access(url=url, params=params, method='POST')
assert len(resp.json()) > 0
return resp.json()
except AssertionError: return 0
#----------------------------------------------------------------------
# on market event
def on_market_impulse(self, event):
"""
call back function on market impulses.
filter/clean tick data
parameters
----------
* event: MarketEvent() object.
returnCode
----------
* 0: a tick was appended to current bar.
* 1: pop current bar, start a new bar.
"""
if event.is_heartbeat == False: # Not an empty heartbeat.
tick = Tick(event.body)
if not self._curr_bar: # create a new bar.
bar = Bar(tick, tick.dt)
self._curr_bar = bar
else:
bar = self._curr_bar
if tick.dt < bar.end and tick.dt >= bar.start:
bar.push(tick)
return 0
else:
self._put_bar_event()
#!!! this line should be excuted before
#!!! the reset of self._curr_bar pointer.
new_start = bar.end
self._curr_bar = Bar(tick, new_start) # create a new bar.
return 1
else: # empty heartbeat.
hb = HeartBeat(event.body)
if not self._curr_bar:
pass
else:
bar = self._curr_bar
if hb.dt < bar.end and hb.dt >= bar.start:
pass # this is only an empty heartbeat.
return -1
else:
if bar:
self._put_bar_event()
#!!! this line should be excuted before
#!!! the reset of self._curr_bar pointer.
new_start = bar.end
tick = Tick({'tick':
{
'ask': bar.ask_close,
'bid': bar.bid_close,
'instrument': bar.instrument,
'time': 0 # only a place holder.
}})
self._curr_bar = Bar(tick, new_start)
return 1
| mit |
giserh/mpld3 | examples/custom_plugin.py | 21 | 2557 | """
Defining a Custom Plugin
========================
Test the custom plugin demoed on the `Pythonic Perambulations
<http://jakevdp.github.io/blog/2014/01/10/d3-plugins-truly-interactive/>`_
blog. Hover over the points to see the associated sinusoid.
Use the toolbar buttons at the bottom-right of the plot to enable zooming
and panning, and to reset the view.
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import mpld3
from mpld3 import plugins, utils
class LinkedView(plugins.PluginBase):
"""A simple plugin showing how multiple axes can be linked"""
JAVASCRIPT = """
mpld3.register_plugin("linkedview", LinkedViewPlugin);
LinkedViewPlugin.prototype = Object.create(mpld3.Plugin.prototype);
LinkedViewPlugin.prototype.constructor = LinkedViewPlugin;
LinkedViewPlugin.prototype.requiredProps = ["idpts", "idline", "data"];
LinkedViewPlugin.prototype.defaultProps = {}
function LinkedViewPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
LinkedViewPlugin.prototype.draw = function(){
var pts = mpld3.get_element(this.props.idpts);
var line = mpld3.get_element(this.props.idline);
var data = this.props.data;
function mouseover(d, i){
line.data = data[i];
line.elements().transition()
.attr("d", line.datafunc(line.data))
.style("stroke", this.style.fill);
}
pts.elements().on("mouseover", mouseover);
};
"""
def __init__(self, points, line, linedata):
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "linkedview",
"idpts": utils.get_id(points, suffix),
"idline": utils.get_id(line),
"data": linedata}
fig, ax = plt.subplots(2)
# scatter periods and amplitudes
np.random.seed(0)
P = 0.2 + np.random.random(size=20)
A = np.random.random(size=20)
x = np.linspace(0, 10, 100)
data = np.array([[x, Ai * np.sin(x / Pi)]
for (Ai, Pi) in zip(A, P)])
points = ax[1].scatter(P, A, c=P + A,
s=200, alpha=0.5)
ax[1].set_xlabel('Period')
ax[1].set_ylabel('Amplitude')
# create the line object
lines = ax[0].plot(x, 0 * x, '-w', lw=3, alpha=0.5)
ax[0].set_ylim(-1, 1)
ax[0].set_title("Hover over points to see lines")
# transpose line data and add plugin
linedata = data.transpose(0, 2, 1).tolist()
plugins.connect(fig, LinkedView(points, lines[0], linedata))
mpld3.show()
| bsd-3-clause |
iris-edu/ispaq | ispaq/concierge.py | 1 | 68757 | """
ISPAQ Data Access Expediter.
:copyright:
Mazama Science
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function)
import os
import sys
import re
import glob
import math
import fileinput
import fnmatch
import tempfile
import pandas as pd
import numpy as np
from distutils.version import StrictVersion
import obspy
from obspy.clients.fdsn import Client
from obspy.clients.fdsn.header import URL_MAPPINGS
from obspy import UTCDateTime
# ISPAQ modules
from .user_request import UserRequest
from . import irisseismic
from . import utils
# Custom exceptions
class NoAvailableDataError(Exception):
"""No matching data are available."""
class Concierge(object):
"""
ISPAQ Data Access Expediter.
:type user_request: :class:`~ispaq.concierge.user_request`
:param user_request: User request containing the combination of command-line
arguments and information from the parsed user preferences file.
:rtype: :class:`~ispaq.concierge` or ``None``
:return: ISPAQ Concierge.
.. rubric:: Example
TODO: include doctest examples
"""
def __init__(self, user_request=None, logger=None):
"""
Initializes the ISPAQ data access expediter.
See :mod:`ispaq.concierge` for all parameters.
"""
# Keep the entire UserRequest and logger
self.user_request = user_request
self.logger = logger
# Copy important UserRequest properties to the Concierge for simpler access
self.requested_starttime = user_request.requested_starttime
self.requested_endtime = user_request.requested_endtime
self.metric_names = user_request.metrics
self.sncl_patterns = user_request.sncls
self.function_by_logic = user_request.function_by_logic
self.logic_types = user_request.function_by_logic.keys()
# Individual elements from the Preferences: section of the preferences file
if (os.path.isdir(user_request.csv_dir)):
self.csv_dir = user_request.csv_dir
else:
self.logger.warning("csv_dir %s does not exist, creating directory" % user_request.csv_dir)
try:
os.makedirs(user_request.csv_dir)
self.csv_dir = user_request.csv_dir
except OSError as exc:
self.logger.warning("Cannot create csv_dir %s, defaulting to current directory" % user_request.csv_dir)
self.csv_dir = "."
if (os.path.isdir(user_request.psd_dir)):
self.psd_dir = user_request.psd_dir
else:
self.logger.warning("psd_dir %s does not exist, creating directory" % user_request.psd_dir)
try:
os.makedirs(user_request.psd_dir)
self.psd_dir = user_request.psd_dir
except OSError as exc:
self.logger.warning("Cannot create psd_dir %s, defaulting to current directory" % user_request.psd_dir)
self.psd_dir = "."
if (os.path.isdir(user_request.pdf_dir)):
self.pdf_dir = user_request.pdf_dir
else:
self.logger.warning("pdf_dir %s does not exist, creating directory" % user_request.pdf_dir)
try:
os.makedirs(user_request.pdf_dir)
self.pdf_dir = user_request.pdf_dir
except OSError as exc:
self.logger.warning("Cannot create pdf_dir %s, defaulting to current directory" % user_request.pdf_dir)
self.pdf_dir = "."
self.output = user_request.output
self.db_name = user_request.db_name
self.pdf_type = user_request.pdf_type
self.pdf_interval = user_request.pdf_interval
self.plot_include = user_request.plot_include
self.sigfigs = user_request.sigfigs
self.sncl_format = user_request.sncl_format
self.netOrder = int(int(self.sncl_format.index("N"))/2)
self.staOrder = int(int(self.sncl_format.index("S"))/2)
self.locOrder = int(int(self.sncl_format.index("L"))/2)
self.chanOrder = int(int(self.sncl_format.index("C"))/2)
# Keep a /dev/null pipe handy in case we want to bit-dump output
self.dev_null = open(os.devnull,"w")
# Add dataselect clients and URLs or reference a local file
self.dataselect_type = None
if user_request.dataselect_url in URL_MAPPINGS.keys():
# Get data from FDSN dataselect service
self.dataselect_url = URL_MAPPINGS[user_request.dataselect_url]
self.dataselect_type = "fdsnws"
try:
self.dataselect_client = Client(self.dataselect_url)
except Exception as e:
err_msg = e
self.logger.critical(err_msg)
raise SystemExit
if user_request.station_url is not None:
if user_request.station_url != user_request.dataselect_url:
self.logger.warning("Station_url should be the same as dataselect_url when retrieving data from FDSN webservices. Station_url '%s' does not match dataselect_url '%s'"
% (user_request.station_url, user_request.dataselect_url))
elif user_request.dataselect_url == "IRISPH5":
self.dataselect_url = "http://service.iris.edu"
self.dataselect_type = "ph5ws"
self.dataselect_client = "PH5"
if user_request.station_url is not None:
if user_request.station_url != user_request.dataselect_url:
self.logger.warning("Station_url should be the same as dataselect_url when retreiving data from IRIS PH5 webservices. Station_url '%s' does not match dataselect_url '%s'"
% (user_request.station_url, user_request.dataselect_url))
elif "http://" in user_request.dataselect_url or "https://" in user_request.dataselect_url:
self.dataselect_url = user_request.dataselect_url
self.dataselect_type = "fdsnws"
try:
self.dataselect_client = Client(self.dataselect_url)
except Exception as e:
err_msg = e
self.logger.critical(err_msg)
raise SystemExit
if user_request.station_url is not None:
if user_request.station_url != user_request.dataselect_url:
self.logger.warning("Station_url should be the same as dataselect_url when retreiving data from FDSN webservices. Station_url '%s' does not match dataselect_url '%s'"
% (user_request.station_url, user_request.dataselect_url))
else:
if os.path.exists(os.path.abspath(user_request.dataselect_url)):
# Get data from local miniseed files
self.dataselect_url = os.path.abspath(user_request.dataselect_url)
self.dataselect_client = None
else:
err_msg = "Cannot find dataselect_url: '%s'" % user_request.dataselect_url
self.logger.critical(err_msg)
raise SystemExit
# Add station clients and URLs or reference a local file
if user_request.station_url is None:
if ("http://" in self.dataselect_url or "https://" in self.dataselect_url):
self.station_url = self.dataselect_url
self.logger.info("Using station_url = %s" % self.dataselect_url)
if (self.dataselect_type == "ph5ws"):
self.station_type = "ph5ws"
self.station_client = "PH5"
else:
try:
self.station_client = Client(self.station_url)
except Exception as e:
self.logger.warning(e)
self.logger.info("Metrics that require metadata information cannot be calculated")
self.station_url = None
self.station_client = None
else:
self.logger.info("No station_url found")
self.logger.info("Metrics that require metadata information cannot be calculated")
self.station_url = None
self.station_client = None
elif user_request.station_url in URL_MAPPINGS.keys():
self.station_url = URL_MAPPINGS[user_request.station_url]
try:
self.station_client = Client(self.station_url)
except Exception as e:
self.logger.warning(e)
self.logger.info("Metrics that require metadata information cannot be calculated")
self.station_url = None
self.station_client = None
elif user_request.station_url == "IRISPH5":
self.station_url = "http://service.iris.edu"
self.station_type = "ph5ws"
self.station_client = "PH5"
elif "http://" in user_request.station_url or "https://" in user_request.station_url:
self.station_url = user_request.station_url
try:
self.station_client = Client(self.station_url)
except Exception as e:
self.logger.warning(e)
self.logger.info("Metrics that require metadata information cannot be calculated")
self.station_url = None
self.station_client = None
else:
if os.path.exists(os.path.abspath(user_request.station_url)):
# Get data from local StationXML files
self.station_url = os.path.abspath(user_request.station_url)
self.station_client = None
else:
err_msg = "Cannot find station_url '%s'" % user_request.station_url
self.logger.warning("Cannot find station_url '%s'" % user_request.station_url)
self.logger.info("Metrics that require metadata information cannot be calculated")
self.station_url = None
self.station_client = None
# Add event clients and URLs or reference a local file
event_metrics = ["sample_snr","cross_talk","polarity_check","orientation_check"]
if user_request.event_url is None:
if any(map(lambda x: x in self.metric_names, event_metrics)): # only warn if calculating event metrics
self.logger.warning("event_url is None or not specified")
self.logger.info("Metrics that require event information cannot be calculated")
self.event_url = None # no event service or xml, some metrics cannot be run
self.event_client = None
elif user_request.event_url == "USGS":
self.event_url = "https://earthquake.usgs.gov"
try:
self.event_client = Client(self.event_url)
except Exception as e:
if any(map(lambda x: x in self.metric_names, event_metrics)): # only warn if calculating event metrics
self.logger.warning(e)
self.logger.info("Metrics that require event information cannot be calculated")
self.event_url = None
self.event_client = None
elif user_request.event_url in URL_MAPPINGS.keys():
self.event_url = URL_MAPPINGS[user_request.event_url]
try:
self.event_client = Client(self.event_url)
except Exception as e:
if any(map(lambda x: x in self.metric_names, event_metrics)): # only warn if calculating event metrics
self.logger.warning(e)
self.logger.info("Metrics that require event information cannot be calculated")
self.event_url = None
self.event_client = None
elif "http://" in user_request.event_url or "https://" in user_request.event_url:
self.event_url = user_request.event_url
try:
self.event_client = Client(self.event_url)
except Exception as e:
if any(map(lambda x: x in self.metric_names, event_metrics)): # only warn if calculating event metrics
self.logger.warning(e)
self.logger.info("Metrics that require event information cannot be calculated")
self.event_url = None
self.event_client = None
else:
if os.path.exists(os.path.abspath(user_request.event_url)):
# Get data from local QUAKEML files
self.event_url = os.path.abspath(user_request.event_url)
self.event_client = None
else:
if any(map(lambda x: x in self.metric_names, event_metrics)): # only warn if calculating event metrics
self.logger.warning("Cannot find event_url '%s'" % user_request.event_url)
self.logger.warning("Metrics that require event information cannot be calculated")
self.event_url = None
self.event_client = None
# Deal with potential start = None
if self.requested_starttime is None and self.dataselect_client is None:
if self.requested_endtime is None:
self.logger.info("No start or end time requested. Start and end time will be determined from local data file extents")
else:
self.logger.info("No start time requested. Start time will be determined from local data file extents")
self.fileDates = []
for sncl_pattern in self.sncl_patterns:
matching_files = []
fpattern1 = '%s' % (sncl_pattern + '.[12][0-9][0-9][0-9].[0-9][0-9][0-9]')
fpattern2 = '%s' % (fpattern1 + '.[A-Z]')
for root, dirnames, fnames in os.walk(self.dataselect_url):
for fname in fnmatch.filter(fnames, fpattern1) + fnmatch.filter(fnames, fpattern2):
matching_files.append(os.path.join(root,fname))
if (len(matching_files) == 0):
continue
else:
#self.logger.debug("Found files: \n %s" % '\n '.join(matching_files))
for _file in matching_files:
try:
_fileSNCL = _file.split("/")[-1]
_fileYear = _fileSNCL.split(".")[4]
_fileJday = _fileSNCL.split(".")[5]
_fileDate = UTCDateTime("-".join([_fileYear,_fileJday]))
self.fileDates.append([_fileDate])
except Exception as e:
self.logger.debug(e)
self.logger.debug("Can't extract date from %s, %s" % (_file,e))
continue
if (len(self.fileDates) == 0):
self.logger.critical("No start date could be determined. No files found")
raise SystemExit
else:
self.requested_starttime = min(self.fileDates)[0]
if self.requested_endtime is None:
self.requested_endtime = max(self.fileDates)[0] +86400 # add one day
self.logger.info("Start time %s" % self.requested_starttime.strftime("%Y-%m-%dT%H:%M:%S"))
self.logger.info("End time %s" % self.requested_endtime.strftime("%Y-%m-%dT%H:%M:%S"))
elif self.requested_starttime is None:
self.logger.critical("--starttime must be specified for dataselect_url %s" % self.station_url)
raise SystemExit
# Output information
filename_metrics = ''
if len(self.user_request.requested_metric_set.split(',')) > 1:
for metric in sorted(self.user_request.requested_metric_set.split(',')):
if metric != 'pdf' and metric != 'psd_corrected':
filename_metrics = filename_metrics + metric + '-'
filename_metrics = filename_metrics[:-1]
else:
filename_metrics = self.user_request.requested_metric_set
file_base = '%s_%s_%s_' % (filename_metrics,
self.user_request.requested_sncl_set,
self.requested_starttime.date)
file_base = file_base.replace("*","x")
file_base = file_base.replace("?","x")
inclusiveEndtime = self.requested_endtime-1
if(inclusiveEndtime.date != self.requested_starttime.date):
file_base = file_base + '%s' % (inclusiveEndtime.date)
else:
file_base = file_base[:-1]
self.output_file_base = self.csv_dir + '/' + file_base
# Availability dataframe is stored if it is read from a local file
self.availability = None
self.initial_availability = None
# Filtered availability dataframe is stored for potential reuse
self.filtered_availability = None
# Add local response files if used
if user_request.resp_dir is None: # use irisws/evalresp
self.resp_dir = None # use irisws/evalresp
elif user_request.resp_dir in URL_MAPPINGS.keys(): # use irisws/evalresp
self.resp_dir = None
else:
if os.path.exists(os.path.abspath(user_request.resp_dir)):
self.resp_dir = os.path.abspath(user_request.resp_dir) # directory where RESP files are located
# file pattern: RESP.<NET>.<STA>.<LOC>.<CHA> or RESP.<STA>.<NET>.<LOC>.<CHA>
else:
err_msg = "Cannot find resp_dir: '%s'" % user_request.resp_dir
self.logger.error(err_msg)
raise ValueError
self.logger.debug("starttime %s, endtime %s", self.requested_starttime.strftime("%Y-%m-%dT%H:%M:%S"), self.requested_endtime.strftime("%Y-%m-%dT%H:%M:%S"))
self.logger.debug("metric_names %s", self.metric_names)
self.logger.debug("sncl_patterns %s", self.sncl_patterns)
self.logger.debug("dataselect_url %s", self.dataselect_url)
self.logger.debug("dataselect_type %s", self.dataselect_type)
self.logger.debug("station_url %s", self.station_url)
self.logger.debug("event_url %s", self.event_url)
self.logger.debug("resp_dir %s", self.resp_dir)
self.logger.debug("output %s", self.output)
self.logger.debug("db_name %s", self.db_name)
self.logger.debug("csv_dir %s", self.csv_dir)
self.logger.debug("pdf_dir %s", self.pdf_dir)
self.logger.debug("psd_dir %s", self.psd_dir)
self.logger.debug("pdf_type %s", self.pdf_type)
self.logger.debug("pdf_interval %s", self.pdf_interval)
self.logger.debug("plot_include %s", self.plot_include)
self.logger.debug("sigfigs %s", self.sigfigs)
self.logger.debug("sncl_format %s", self.sncl_format)
def get_sncl_pattern(self, netIn, staIn, locIn, chanIn):
snclList = list()
snclList.insert(self.netOrder, netIn)
snclList.insert(self.staOrder, staIn)
snclList.insert(self.locOrder, locIn)
snclList.insert(self.chanOrder, chanIn)
sncl_pattern = "%s.%s.%s.%s" % tuple(snclList)
return(sncl_pattern)
def get_availability(self,
network=None, station=None, location=None, channel=None,
starttime=None, endtime=None,
latitude=None, longitude=None, minradius=None, maxradius=None):
"""
################################################################################
# getAvailability method returns a dataframe with information from the output
# of the fdsn station web service with "format=text&level=channel".
# With additional parameters, this webservice returns information on all
# matching SNCLs that have available data.
#
# The fdsnws/station/availability web service will return space characters for location
# codes that are SPACE SPACE.
#
# http://service.iris.edu/fdsnws/station/1/
#
# #Network | Station | Location | Channel | Latitude | Longitude | Elevation | Depth | Azimuth | Dip | Instrument | Scale | ScaleFreq | ScaleUnits | SampleRate | StartTime | EndTime
# CU|ANWB|00|LHZ|17.66853|-61.78557|39.0|0.0|0.0|-90.0|Streckeisen STS-2 Standard-gain|2.43609E9|0.05|M/S|1.0|2010-02-10T18:35:00|2599-12-31T23:59:59
#
################################################################################
if (!isGeneric("getAvailability")) {
setGeneric("getAvailability", function(obj, network, station, location, channel,
#starttime, endtime,includerestricted,
starttime, endtime,
latitude, longitude, minradius, maxradius) {
standardGeneric("getAvailability")
})
}
# END of R documentation
Returns a dataframe of SNCLs available from the `station_url` source
specified in the `user_request` object used to initialize the
`Concierge`.
By default, information in the `user_request` is used to generate
a FDSN webservices request for station data. Where arguments are
provided, these are used to override the information found in
`user_request`.
:type network: str
:param network: Select one or more network codes. Can be SEED network
codes or data center defined codes. Multiple codes are
comma-separated.
:type station: str
:param station: Select one or more SEED station codes. Multiple codes
are comma-separated.
:type location: str
:param location: Select one or more SEED location identifiers. Multiple
identifiers are comma-separated. As a special case ``"--"`` (two
dashes) will be translated to a string of two space characters to
match blank location IDs.
:type channel: str
:param channel: Select one or more SEED channel codes. Multiple codes
are comma-separated.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Limit to metadata epochs starting on or after the
specified start time.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Limit to metadata epochs ending on or before the
specified end time.
#:type includerestricted: bool
#:param includerestricted: Specify if results should include information
# for restricted stations.
:type latitude: float
:param latitude: Specify the latitude to be used for a radius search.
:type longitude: float
:param longitude: Specify the longitude to the used for a radius
search.
:type minradius: float
:param minradius: Limit results to stations within the specified
minimum number of degrees from the geographic point defined by the
latitude and longitude parameters.
:type maxradius: float
:param maxradius: Limit results to stations within the specified
maximum number of degrees from the geographic point defined by the
latitude and longitude parameters.
#.. rubric:: Example
#>>> my_request = UserRequest(dummy=True)
#>>> concierge = Concierge(my_request)
#>>> concierge.get_availability() #doctest: +ELLIPSIS
#[u'US.OXF..BHE', u'US.OXF..BHN', u'US.OXF..BHZ']
"""
# NOTE: Building the availability dataframe from a large StationXML is time consuming.
# NOTE: If we are using local station data then we should only do this once.
# Special case when using all defaults helps speed up any metrics making mutiple calls to get_availability
# NOTE: If future metrics require this, then uncomment here and add concierge.filtered_availability = None to the end of every metric script.
#if (network is None and
# station is None and
# location is None and
# channel is None and
# starttime is None and
# endtime is None and
# self.filtered_availability is not None):
# return(self.filtered_availability)
# Read from a local StationXML file one time only -- IE, once this section has been run once in a job, don't run it again... so availability2 wont run this section.
if self.station_client is None:
# Using Local Data
# Only read/parse if we haven't already done so
if self.initial_availability is None:
try:
# Get list of all sncls we have metadata for
if self.station_url is not None:
self.logger.info("Reading StationXML file %s" % self.station_url)
sncl_inventory = obspy.read_inventory(self.station_url, format="STATIONXML")
except Exception as e:
err_msg = "The StationXML file: '%s' is not valid" % self.station_url
self.logger.debug(e)
self.logger.error(err_msg)
raise ValueError
self.logger.debug('Building availability dataframe...')
# Allow arguments to override UserRequest parameters
if starttime is None:
_starttime = self.requested_starttime
else:
_starttime = starttime
if endtime is None:
_endtime = self.requested_endtime
else:
_endtime = endtime
# Set up empty dataframe
df = pd.DataFrame(columns=("network", "station", "location", "channel",
"latitude", "longitude", "elevation", "depth" ,
"azimuth", "dip", "instrument",
"scale", "scalefreq", "scaleunits", "samplerate",
"starttime", "endtime", "snclId"))
# Walk through the Inventory object and fill the dataframe with metadata
if 'sncl_inventory' in locals():
for n in sncl_inventory.networks:
for s in n.stations:
for c in s.channels:
if (c.start_date < _endtime) and ((c.end_date > _starttime) or (c.end_date is None)):
if c.end_date is None:
tmpend = UTCDateTime("2599-12-31T23:59:59")
else:
tmpend = c.end_date
snclId = self.get_sncl_pattern(n.code, s.code, c.location_code, c.code)
if c.response.instrument_sensitivity is None:
df.loc[len(df)] = [n.code, s.code, c.location_code, c.code,
c.latitude, c.longitude, c.elevation, c.depth,
c.azimuth, c.dip, c.sensor.description,
None,
None,
None,
c.sample_rate,
c.start_date, c.end_date, snclId]
else:
df.loc[len(df)] = [n.code, s.code, c.location_code, c.code,
c.latitude, c.longitude, c.elevation, c.depth,
c.azimuth, c.dip, c.sensor.description,
c.response.instrument_sensitivity.value,
c.response.instrument_sensitivity.frequency,
c.response.instrument_sensitivity.input_units,
c.sample_rate,
c.start_date, c.end_date, snclId]
# Add local data to the dataframe, even if we don't have metadata
# Loop through all sncl_patterns in the preferences file ---------------
self.logger.debug("Searching for data in %s" % self.dataselect_url)
for sncl_pattern in self.sncl_patterns:
try:
UR_network = sncl_pattern.split('.')[self.netOrder]
UR_station = sncl_pattern.split('.')[self.staOrder]
UR_location = sncl_pattern.split('.')[self.locOrder]
UR_channel = sncl_pattern.split('.')[self.chanOrder]
except Exception as e:
err_msg = "Could not parse sncl_pattern %s" % (sncl_pattern)
self.logger.error(err_msg)
raise ValueError
# Allow arguments to override UserRequest parameters
if network is None:
_network = UR_network
else:
_network = network
if station is None:
_station = UR_station
else:
_station = station
if location is None:
_location = UR_location
else:
_location = location
if channel is None:
_channel = UR_channel
else:
_channel = channel
_sncl_pattern = self.get_sncl_pattern(_network,_station,_location,_channel)
self.logger.debug("Adding %s to availability dataframe" % _sncl_pattern)
if self.station_client is None: # Local metadata
if self.dataselect_client is None: # Local data
# Loop over the available data and add to dataframe if they aren't yet
if len(sncl_pattern.split('.')) > 4:
tmp_sncl_pattern = os.path.splitext(sncl_pattern)[0]
q = os.path.splitext(sncl_pattern)[1][1]
fpattern1 = '%s' % (tmp_sncl_pattern + '.[12][0-9][0-9][0-9].[0-9][0-9][0-9]')
if q.isalpha():
fpattern2 = '%s' % (fpattern1 + '.' + q)
else:
fpattern2 = '%s' % (fpattern1 + '.[A-Z]')
else:
fpattern1 = '%s' % (sncl_pattern + '.[12][0-9][0-9][0-9].[0-9][0-9][0-9]')
fpattern2 = '%s' % (fpattern1 + '.[A-Z]')
matching_files = []
for root, dirnames, fnames in os.walk(self.dataselect_url):
for fname in fnmatch.filter(fnames, fpattern1) + fnmatch.filter(fnames, fpattern2):
matching_files.append(os.path.join(root,fname))
#self.logger.debug("Found files: \n %s" % '\n '.join(matching_files))
if (len(matching_files) == 0):
continue
else:
# Loop over all files that we have matching our desired sncls
for _file in matching_files:
fileSNCL = _file.split("/")[-1]
snclId = fileSNCL.split(".")[0] + "." + fileSNCL.split(".")[1] + "." + fileSNCL.split(".")[2] + "." + fileSNCL.split(".")[3]
if not any(df.snclId.str.contains(snclId)):
# Only add if not already in the df
df.loc[len(df)] = [fileSNCL.split(".")[self.netOrder], fileSNCL.split(".")[self.staOrder],
fileSNCL.split(".")[self.locOrder], fileSNCL.split(".")[self.chanOrder],
None, None, None, None,
None, None, None,
None, None, None,
None, UTCDateTime("1900-01-01"), UTCDateTime("2599-12-31"),
snclId]
# Now save the dataframe internally
self.initial_availability = df
# Container for all of the individual sncl_pattern dataframes generated
sncl_pattern_dataframes = []
loopCounter = 0 # For crossCorrelation when we look for all sn.ls
# Loop through all sncl_patterns ---------------------------------------
for sncl_pattern in self.sncl_patterns:
# We only want to do this one time if we are looking for *.*.*.chan
# For example, during crossCorrelation. Otherwise it creates a bloated
# availability dataframe with the same sncls repeating #sncl_patterns times
loopCounter += 1
if (network is "*" and station is "*" and location is "*" and loopCounter > 1):
continue
# Get "User Request" parameters
try:
UR_network = sncl_pattern.split('.')[self.netOrder]
UR_station = sncl_pattern.split('.')[self.staOrder]
UR_location = sncl_pattern.split('.')[self.locOrder]
UR_channel = sncl_pattern.split('.')[self.chanOrder]
except Exception as e:
err_msg = "Could not parse sncl_pattern %s" % (sncl_pattern)
self.logger.error(err_msg)
raise ValueError
# Allow arguments to override UserRequest parameters
if starttime is None:
_starttime = self.requested_starttime
else:
_starttime = starttime
if endtime is None:
_endtime = self.requested_endtime
else:
_endtime = endtime
if network is None:
_network = UR_network
else:
_network = network
if station is None:
_station = UR_station
else:
_station = station
if location is None:
_location = UR_location
else:
_location = location
if channel is None:
_channel = UR_channel
else:
_channel = channel
_sncl_pattern = self.get_sncl_pattern(_network, _station, _location, _channel)
# Get availability dataframe ---------------------------------------
if self.station_client is None:
# Use pre-existing internal dataframe if we are using local data, filtered by time
df = self.initial_availability
if not df['endtime'] is None:
df = df[(df['starttime'] < _endtime-1) & (df['endtime'] > _starttime)]
else:
df = df[(df['starttime'] < _endtime-1)]
if df is None:
continue
elif self.station_client == "PH5":
self.logger.debug("read IRISPH5 station web services %s/%s for %s,%s,%s,%s,%s,%s" % (self.station_url,self.station_type,_network, _station, _location, _channel, _starttime.strftime('%Y.%j'), _endtime.strftime('%Y.%j')))
try:
df = irisseismic.getAvailability(self.station_url,self.station_type,network=_network, station=_station,
location=_location, channel=_channel,starttime=_starttime, endtime=_endtime,
includerestricted=True,
latitude=latitude, longitude=longitude, minradius=minradius, maxradius=maxradius)
except Exception as e:
if (minradius):
err_msg = "No stations found for %s within radius %s-%s degrees of latitude,longitude %s,%s" % (_sncl_pattern,minradius,maxradius,latitude,longitude)
else:
err_msg = "No stations found for %s" % (_sncl_pattern)
self.logger.debug(str(e).strip('\n'))
self.logger.info(err_msg)
continue
if (df.empty):
if (minradius):
err_msg = "No stations found for %s within radius %s-%s degrees of latitude,longitude %s,%s" % (_sncl_pattern,minradius,maxradius,latitude,longitude)
else:
err_msg = "No stations found for %s" % (_sncl_pattern)
self.logger.info(err_msg)
continue
self.logger.debug('Adding %s to the availability dataframe' % _sncl_pattern)
else:
# Read from FDSN web services
self.logger.debug("read FDSN station web services %s for %s,%s,%s,%s,%s,%s" % (self.station_url,_network, _station, _location, _channel, _starttime.strftime('%Y.%j'), _endtime.strftime('%Y.%j')))
try:
sncl_inventory = self.station_client.get_stations(starttime=_starttime, endtime=_endtime,
network=_network, station=_station,
location=_location, channel=_channel,
includerestricted=True,
latitude=latitude, longitude=longitude,
minradius=minradius, maxradius=maxradius,
level="channel",matchtimeseries=True)
except Exception as e:
if (minradius):
err_msg = "No stations found for %s within radius %s-%s degrees of latitude,longitude %s,%s" % (_sncl_pattern,minradius,maxradius,latitude,longitude)
else:
err_msg = "No stations found for %s" % (_sncl_pattern)
self.logger.debug(str(e).strip('\n'))
self.logger.info(err_msg)
continue
self.logger.debug('Adding %s to the availability dataframe' % _sncl_pattern)
# Set up empty dataframe
df = pd.DataFrame(columns=("network", "station", "location", "channel",
"latitude", "longitude", "elevation", "depth" ,
"azimuth", "dip", "instrument",
"scale", "scalefreq", "scaleunits", "samplerate",
"starttime", "endtime", "snclId"))
# Walk through the Inventory object
for n in sncl_inventory.networks:
for s in n.stations:
for c in s.channels:
snclId = self.get_sncl_pattern(n.code, s.code, c.location_code, c.code)
if c.response.instrument_sensitivity is None:
df.loc[len(df)] = [n.code, s.code, c.location_code, c.code,
c.latitude, c.longitude, c.elevation, c.depth,
c.azimuth, c.dip, c.sensor.description,
None,
None,
None,
c.sample_rate,
c.start_date, c.end_date, snclId]
else:
df.loc[len(df)] = [n.code, s.code, c.location_code, c.code,
c.latitude, c.longitude, c.elevation, c.depth,
c.azimuth, c.dip, c.sensor.description,
c.response.instrument_sensitivity.value,
c.response.instrument_sensitivity.frequency,
c.response.instrument_sensitivity.input_units,
c.sample_rate,
c.start_date, c.end_date, snclId]
# Subset availability dataframe based on _sncl_pattern -------------
# NOTE: This shouldn't be necessary for dataframes obtained from FDSN
# NOTE: but it's quick so we always do it
# Create python regex from _sncl_pattern
# NOTE: Replace '.' first before introducing '.*' or '.'!
py_pattern = _sncl_pattern.replace('.','\\.').replace('*','.*').replace('?','.')
# Filter dataframe
df = df[df.snclId.str.contains(py_pattern)]
# Subset based on locally available data ---------------------------
if self.dataselect_client is None:
fpattern1 = '%s.%s' % (_sncl_pattern,_starttime.strftime('%Y.%j'))
fpattern2 = '%s' % (fpattern1 + '.[A-Z]')
matching_files = []
for root, dirnames, fnames in os.walk(self.dataselect_url):
for fname in fnmatch.filter(fnames, fpattern1) + fnmatch.filter(fnames, fpattern2):
matching_files.append(os.path.join(root,fname))
if (len(matching_files) == 0):
err_msg = "No local waveforms matching %s" % fpattern1
self.logger.debug(err_msg)
continue
else:
# Create a mask based on available file names
mask = df.snclId.str.contains("MASK WITH ALL FALSE")
for i in range(len(matching_files)):
basename = os.path.basename(matching_files[i])
match = re.match('[^\\.]*\\.[^\\.]*\\.[^\\.]*\\.[^\\.]*',basename)
sncl = match.group(0)
py_pattern = sncl.replace('.','\\.')
mask = mask | df.snclId.str.contains(py_pattern)
# Subset based on the mask
df = df[mask]
# Subset based on distance
# Create a temporary column that has the distances, use to subset
df.insert(0,'dist',"EMPTY")
if maxradius is not None or minradius is not None:
# There are distance constraints
for ii in range(len(df)):
lat = df['latitude'].iloc[ii];
lon = df['longitude'].iloc[ii];
if (lat and lon):
if not (math.isnan(lon) or math.isnan(lat)):
[dist,AB,BA] = obspy.geodetics.base.gps2dist_azimuth(latitude, longitude, lat, lon)
dist = obspy.geodetics.base.kilometer2degrees(dist/1000)
if (minradius is None) and (maxradius is not None):
if abs(dist) <= maxradius:
df["dist"].iloc[ii] = "KEEP"
elif (maxradius is None) and (minradius is not None):
if abs(dist) >= minradius:
df["dist"].iloc[ii] = "KEEP"
elif (maxradius is not None) and (minradius is not None):
if abs(dist) <= maxradius and abs(dist) >= minradius:
df["dist"].iloc[ii] = "KEEP"
else:
next
else:
next
df = df[df.dist.str.contains("KEEP")]
df = df.drop('dist', 1)
# Append this dataframe
if df.shape[0] == 0:
self.logger.debug("No SNCLS found matching '%s' (sncl_format=%s)" % (_sncl_pattern,self.sncl_format))
else:
#if df.snclId not in sncl_pattern_dataframes[:].snclId:
sncl_pattern_dataframes.append(df) # tack the dataframes together
# END of sncl_patterns loop --------------------------------------------
if len(sncl_pattern_dataframes) == 0:
err_msg = "No available waveforms for %s matching " % _starttime.strftime('%Y-%m-%d') + str(self.sncl_patterns)
self.logger.info(err_msg)
#raise NoAvailableDataError(err_msg)
else:
# Those dataframes become availability
availability = pd.concat(sncl_pattern_dataframes, ignore_index=True, verify_integrity=True)
# Remove duplicates -- starttime/endtime datatypes don't allow drop_duplicates
# convert starttime to string in new column ("start"), drop_duplicates using that, remove column
availability['start'] = availability['starttime'].astype('str')
availability = availability.drop_duplicates(['snclId', 'start'])
availability = availability.drop('start', 1)
if availability.shape[0] == 0:
err_msg = "No available waveforms matching" + str(self.sncl_patterns)
self.logger.info(err_msg)
else:
# The concierge should remember this dataframe for metrics that
# make multiple calls to get_availability with all defaults.
self.filtered_availability = availability
return availability
def get_dataselect(self,
network=None, station=None, location=None, channel=None,
starttime=None, endtime=None, quality=None, repository=None,
inclusiveEnd=False, ignoreEpoch=False):
"""
Returns an R Stream that can be passed to metrics calculation methods.
All arguments are required except for starttime and endtime. These arguments
may be specified but will default to the time information found in the
`user_request` used to generate a FDSN webservices request for MINIseed data.
:type network: str
:param network: Select one or more network codes. Can be SEED network
codes or data center defined codes. Multiple codes are
comma-separated.
:type station: str
:param station: Select one or more SEED station codes. Multiple codes
are comma-separated.
:type location: str
:param location: Select one or more SEED location identifiers. Multiple
identifiers are comma-separated. As a special case ``"--"`` (two
dashes) will be translated to a string of two space characters to
match blank location IDs.
:type channel: str
:param channel: Select one or more SEED channel codes. Multiple codes
are comma-separated.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Limit to metadata epochs starting on or after the
specified start time.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Limit to metadata epochs ending on or before the
specified end time.
"""
# Allow arguments to override UserRequest parameters
if starttime is None:
_starttime = self.requested_starttime
else:
_starttime = starttime
if endtime is None:
_endtime = self.requested_endtime
else:
_endtime = endtime
if self.dataselect_type is None:
# Read local MiniSEED file and convert to R_Stream
nday = int((_endtime - .00001).julday - _starttime.julday) + 1 # subtract a short amount of time for 00:00:00 endtimes
if (nday == 1):
_sncl_pattern = self.get_sncl_pattern(network, station, location, channel)
fpattern1 = '%s.%s' % (_sncl_pattern,_starttime.strftime('%Y.%j'))
fpattern2 = '%s' % (fpattern1 + '.[A-Z]')
matching_files = []
for root, dirnames, fnames in os.walk(self.dataselect_url):
for fname in fnmatch.filter(fnames, fpattern1) + fnmatch.filter(fnames, fpattern2):
matching_files.append(os.path.join(root,fname))
if (len(matching_files) == 0):
self.logger.info("No files found matching '%s'" % (fpattern1))
else:
filepath=matching_files[0]
if (len(matching_files) > 1):
self.logger.debug("Multiple files found: %s" % " ".join(matching_files))
self.logger.warning("Multiple files found matching " '%s -- using %s' % (fpattern1, filepath))
try:
# Get the ObsPy version of the stream
if not inclusiveEnd:
_endtime = _endtime - 0.000001
py_stream = obspy.read(filepath)
py_stream = py_stream.slice(_starttime, _endtime, nearest_sample=False)
if (StrictVersion(obspy.__version__) < StrictVersion("1.1.0")):
flag_dict = obspy.io.mseed.util.get_timing_and_data_quality(filepath)
act_flags = [0,0,0,0,0,0,0,0] # not supported before 1.1.0
io_flags = [0,0,0,0,0,0,0,0] # not supported before 1.1.0
dq_flags = flag_dict['data_quality_flags']
else:
flag_dict = obspy.io.mseed.util.get_flags(filepath)
act_flags = []
io_flags = []
dq_flags = []
for k,v in flag_dict['activity_flags_counts'].items():
act_flags.append(v)
for k,v in flag_dict['io_and_clock_flags_counts'].items():
io_flags.append(v)
for k,v in flag_dict['data_quality_flags_counts'].items():
dq_flags.append(v)
if flag_dict["timing_quality"]:
timing_qual=flag_dict["timing_quality"]["mean"]
else:
timing_qual=None
# NOTE: ObsPy does not store station metadata with each trace.
# NOTE: We need to read them in separately from station metadata.
availability = self.get_availability(network, station, location, channel, _starttime, _endtime)
if(ignoreEpoch == False):
if (len(availability) > 1):
raise Exception("Multiple metadata epochs found for %s" % _sncl_pattern)
sensor = availability.instrument[0]
scale = availability.scale[0]
scalefreq = availability.scalefreq[0]
scaleunits = availability.scaleunits[0]
if sensor is None: sensor = ""
if scale is None: scale = np.NaN
if scalefreq is None: scalefreq = np.NaN
if scaleunits is None: scaleunits = ""
latitude = availability.latitude[0]
longitude = availability.longitude[0]
elevation = availability.elevation[0]
depth = availability.depth[0]
azimuth = availability.azimuth[0]
dip = availability.dip[0]
# Create the IRISSeismic version of the stream
r_stream = irisseismic.R_Stream(py_stream, _starttime, _endtime, act_flags, io_flags, dq_flags, timing_qual,
sensor, scale, scalefreq, scaleunits, latitude, longitude, elevation, depth, azimuth, dip)
except Exception as e:
err_msg = "Error reading in local waveform from %s" % filepath
self.logger.debug(e)
self.logger.debug(err_msg)
raise
if len(utils.get_slot(r_stream, 'traces')) == 0:
raise Exception("no data available")
else:
# create tempfile
x = tempfile.TemporaryFile()
# begin day loop
for day in range(nday):
start = (_starttime + day * 86400)
start = start - (start.hour * 3600 + start.minute * 60 + start.second + start.microsecond * .000001)
end = start + 86400
if start <= _starttime:
start = _starttime
if end >= _endtime:
end = _endtime
_sncl_pattern = self.get_sncl_pattern(network, station, location, channel)
filename = '%s.%s' % (_sncl_pattern,_starttime.strftime('%Y.%j'))
self.logger.debug("read local miniseed file for %s..." % filename)
fpattern1 = self.dataselect_url + '/' + filename + '.[12][0-9][0-9][0-9].[0-9][0-9][0-9]'
fpattern2 = fpattern1 + '.[A-Z]'
matching_files = glob.glob(fpattern1) + glob.glob(fpattern2)
if (len(matching_files) == 0):
err_msg = "No files found matching '%s'" % (fpattern1)
raise Exception(err_msg)
else:
filepath = matching_files[0]
if (len(matching_files) > 1):
self.logger.debug("Multiple files found: %s" % " ".join(matching_files))
self.logger.warning("Multiple files found matching" '%s -- using %s' % (fpattern1, filepath))
# write miniseed to tempfile
with open(filepath, 'rb') as f:
x.write(f.read())
x.flush()
f.close()
try:
py_stream = obspy.read(x)
x.close()
if not inclusiveEnd:
_endtime = _endtime - 0.000001
py_stream = py_stream.slice(_starttime, _endtime, nearest_sample=False)
# NOTE: ObsPy does not store state-of-health flags with each stream.
if (StrictVersion(obspy.__version__) < StrictVersion("1.1.0")):
flag_dict = obspy.io.mseed.util.get_timing_and_data_quality(filepath)
act_flags = [0,0,0,0,0,0,0,0]
io_flags = [0,0,0,0,0,0,0,0]
dq_flags = flag_dict['data_quality_flags']
else:
flag_dict = obspy.io.mseed.util.get_flags(filepath)
act_flags = []
io_flags = []
dq_flags = []
for k,v in flag_dict['activity_flags_counts'].items():
act_flags.append(v)
for k,v in flag_dict['io_and_clock_flags_counts'].items():
io_flags.append(v)
for k,v in flag_dict['data_quality_flags_counts'].items():
dq_flags.append(v)
if flag_dict["timing_quality"]:
timing_qual=flag_dict["timing_quality"]["mean"]
else:
timing_qual=None
# NOTE: ObsPy does not store station metadata with each trace.
# NOTE: We need to read them in separately from station metadata.
# NOTE: This should be consistent for each day of data
self.logger.info('%s, %s,%s,%s' % (network,station,location,channel))
availability = self.get_availability(network, station, location, channel, _starttime, _endtime)
if(ignoreEpoch == False):
if (len(availability) > 1):
raise Exception("Multiple metadata epochs found for %s" % _sncl_pattern)
sensor = availability.instrument[0]
scale = availability.scale[0]
scalefreq = availability.scalefreq[0]
scaleunits = availability.scaleunits[0]
if sensor is None: sensor = ""
if scale is None: scale = np.NaN
if scalefreq is None: scalefreq = np.NaN
if scaleunits is None: scaleunits = ""
latitude = availability.latitude[0]
longitude = availability.longitude[0]
elevation = availability.elevation[0]
depth = availability.depth[0]
azimuth = availability.azimuth[0]
dip = availability.dip[0]
# Create the IRISSeismic version of the stream
r_stream = irisseismic.R_Stream(py_stream, _starttime, _endtime, act_flags, io_flags, dq_flags, timing_qual,
sensor, scale, scalefreq, scaleunits, latitude, longitude, elevation, depth, azimuth, dip)
except Exception as e:
err_msg = "Error reading in local waveform from %s" % filepath
self.logger.debug(e)
self.logger.debug(err_msg)
raise
if len(utils.get_slot(r_stream, 'traces')) == 0:
raise Exception("no data available")
else:
# Read from FDSN web services
try:
# R getDataselect() seems to capture awkward error reports when there is no data
# we want to suppress the stderr channel briefly to block the unwanted feedback from R
orig_stderr = sys.stderr
sys.stderr = self.dev_null
r_stream = irisseismic.R_getDataselect(self.dataselect_url, self.dataselect_type, network, station, location, channel, _starttime, _endtime, quality, repository,inclusiveEnd, ignoreEpoch)
sys.stderr = orig_stderr
except Exception as e:
err_msg = "Error reading in waveform from FDSN dataselect webservice client (base url: %s)" % self.dataselect_url
self.logger.error(err_msg)
self.logger.debug(str(e).strip('\n'))
raise
# Some FDSN web services cut on record boundaries instead of samples, so make sure we have correct start/end times
try:
r_stream = irisseismic.R_slice(r_stream,_starttime, _endtime)
except Exception as e:
err_msg = "Error cutting R stream for start %s and end %s" % (_starttime, _endtime)
self.logger.debug(err_msg)
raise
# TODO: Do we need to test for valid R_Stream.
if False:
return None # TODO: raise an exception
else:
return r_stream
def get_event(self,
starttime=None, endtime=None,
minmag=5.5, maxmag=None, magtype=None,
mindepth=None, maxdepth=None):
"""
################################################################################
# getEvent method returns seismic event data from the event webservice:
#
# https://earthquake.usgs.gov/fdsnws/event/1/
#
# TODO: The getEvent method could be fleshed out with a more complete list
# TODO: of arguments to be used as ws-event parameters.
################################################################################
# https://service.iris.edu/fdsnws/event/1/query?starttime=2013-02-01T00:00:00&endtime=2013-02-02T00:00:00&minmag=5&format=text
#
# #EventID | Time | Latitude | Longitude | Depth | Author | Catalog | Contributor | ContributorID | MagType | Magnitude | MagAuthor | EventLocationName
# 4075900|2013-02-01T22:18:33|-11.12|165.378|10.0|NEIC|NEIC PDE|NEIC PDE-Q||MW|6.4|GCMT|SANTA CRUZ ISLANDS
if (!isGeneric("getEvent")) {
setGeneric("getEvent", function(obj, starttime, endtime, minmag, maxmag, magtype,
mindepth, maxdepth) {
standardGeneric("getEvent")
})
}
# END of R documentation
Returns a dataframe of events returned by the `event_url` source
specified in the `user_request` object used to initialize the
`Concierge`.
By default, information in the `user_request` is used to generate
a FDSN webservices request for event data. Where arguments are
provided, these are used to override the information found in
`user_request.
:type starttime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param starttime: Limit to metadata epochs starting on or after the
specified start time.
:type endtime: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param endtime: Limit to metadata epochs ending on or before the
specified end time.
:type minmagnitude: float, optional
:param minmagnitude: Limit to events with a magnitude larger than the
specified minimum.
:type maxmagnitude: float, optional
:param maxmagnitude: Limit to events with a magnitude smaller than the
specified maximum.
:type magnitudetype: str, optional
:param magnitudetype: Specify a magnitude type to use for testing the
minimum and maximum limits.
:type mindepth: float, optional
:param mindepth: Limit to events with depth, in kilometers, larger than
the specified minimum.
:type maxdepth: float, optional
:param maxdepth: Limit to events with depth, in kilometers, smaller
than the specified maximum.
#.. rubric:: Example
#>>> my_request = UserRequest(dummy=True)
#>>> concierge = Concierge(my_request)
#>>> concierge.get_event() #doctest: +ELLIPSIS
'
eventId time latitude longitude depth author...'
"""
# Allow arguments to override UserRequest parameters
if starttime is None:
_starttime = self.requested_starttime
else:
_starttime = starttime
if endtime is None:
_endtime = self.requested_endtime
else:
_endtime = endtime
if self.event_client is None:
# Read local QuakeML file
try:
event_catalog = obspy.read_events(self.event_url)
except Exception as e:
err_msg = "The QuakeML file: '%s' is not valid" % self.event_url
self.logger.debug(e)
self.logger.error(err_msg)
raise ValueError
# events.columns
# Index([u'eventId', u'time', u'latitude', u'longitude', u'depth', u'author',
# u'cCatalog', u'contributor', u'contributorId', u'magType', u'magnitude',
# u'magAuthor', u'eventLocationName'],
# dtype='object')
#
dataframes = []
for event in event_catalog:
origin = event.preferred_origin()
magnitude = event.preferred_magnitude()
df = pd.DataFrame({'eventId': re.sub('.*eventid=','',event.resource_id.id),
'time': origin.time,
'latitude': origin.latitude,
'longitude': origin.longitude,
'depth': origin.depth/1000, # QuakeML convention is meters, convert to kilometers
'magType': magnitude.magnitude_type,
'magnitude': magnitude.mag,
'eventLocationName': event.event_descriptions[0].text},
index=[0])
dataframes.append(df)
# Concatenate into the events dataframe
events = pd.concat(dataframes, ignore_index=True)
if _starttime:
events = events[events['time'] >= _starttime]
if _endtime:
events = events[events['time'] <= _endtime]
if minmag:
events = events[events['magnitude'] >= minmag]
if maxmag:
events = events[events['magnitude'] <= maxmag]
if magtype:
events = events[events['magType'].str.match(magtype, as_indexer=True)]
if mindepth:
events = events[events['depth'] >= mindepth]
if maxdepth:
events = events[events['depth'] <= maxdepth]
events.index=np.arange(1,len(events)+1)
else:
# Read from FDSN web services
try:
events = irisseismic.getEvent(self.event_url,
starttime=_starttime,
endtime=_endtime,
minmag=minmag,
maxmag=maxmag,
magtype=magtype,
mindepth=mindepth,
maxdepth=maxdepth)
except Exception as e:
err_msg = "The event_url: '%s' returns an error" % (self.event_url)
self.logger.debug(str(e).strip('\n'))
self.logger.error(err_msg)
raise
if events.shape[0] == 0:
return None # TODO: raise an exception
else:
return events
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| lgpl-3.0 |
plotly/plotly.py | packages/python/plotly/plotly/tests/test_optional/test_matplotlylib/test_data.py | 2 | 2718 | from __future__ import absolute_import
import pytest
from plotly import optional_imports
from plotly.tests.test_optional.optional_utils import run_fig
from plotly.tests.test_optional.test_matplotlylib.data.data import *
matplotlylib = optional_imports.get_module("plotly.matplotlylib")
if matplotlylib:
import matplotlib.pyplot as plt
@pytest.mark.matplotlib
def test_line_data():
fig, ax = plt.subplots()
ax.plot(D["x1"], D["y1"])
renderer = run_fig(fig)
for xi, xf, yi, yf in zip(
renderer.plotly_fig["data"][0]["x"],
D["x1"],
renderer.plotly_fig["data"][0]["y"],
D["y1"],
):
assert xi == xf, (
str(renderer.plotly_fig["data"][0]["x"]) + " is not " + str(D["x1"])
)
assert yi == yf, (
str(renderer.plotly_fig["data"][0]["y"]) + " is not " + str(D["y1"])
)
@pytest.mark.matplotlib
def test_lines_data():
fig, ax = plt.subplots()
ax.plot(D["x1"], D["y1"])
ax.plot(D["x2"], D["y2"])
renderer = run_fig(fig)
for xi, xf, yi, yf in zip(
renderer.plotly_fig["data"][0]["x"],
D["x1"],
renderer.plotly_fig["data"][0]["y"],
D["y1"],
):
assert xi == xf, (
str(renderer.plotly_fig["data"][0]["x"]) + " is not " + str(D["x1"])
)
assert yi == yf, (
str(renderer.plotly_fig["data"][0]["y"]) + " is not " + str(D["y1"])
)
for xi, xf, yi, yf in zip(
renderer.plotly_fig["data"][1]["x"],
D["x2"],
renderer.plotly_fig["data"][1]["y"],
D["y2"],
):
assert xi == xf, (
str(renderer.plotly_fig["data"][1]["x"]) + " is not " + str(D["x2"])
)
assert yi == yf, (
str(renderer.plotly_fig["data"][0]["y"]) + " is not " + str(D["y2"])
)
@pytest.mark.matplotlib
def test_bar_data():
fig, ax = plt.subplots()
ax.bar(D["x1"], D["y1"])
renderer = run_fig(fig)
for yi, yf in zip(renderer.plotly_fig["data"][0]["y"], D["y1"]):
assert yi == yf, (
str(renderer.plotly_fig["data"][0]["y"]) + " is not " + str(D["y1"])
)
@pytest.mark.matplotlib
def test_bars_data():
fig, ax = plt.subplots()
ax.bar(D["x1"], D["y1"], color="r")
ax.barh(D["x2"], D["y2"], color="b")
renderer = run_fig(fig)
for yi, yf in zip(renderer.plotly_fig["data"][0]["y"], D["y1"]):
assert yi == yf, (
str(renderer.plotly_fig["data"][0]["y"]) + " is not " + str(D["y1"])
)
for xi, yf in zip(renderer.plotly_fig["data"][1]["x"], D["y2"]):
assert xi == yf, (
str(renderer.plotly_fig["data"][1]["x"]) + " is not " + str(D["y2"])
)
| mit |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/util/clipboard.py | 16 | 6355 | # Pyperclip v1.3
# A cross-platform clipboard module for Python. (only handles plain text for now)
# By Al Sweigart [email protected]
# Usage:
# import pyperclip
# pyperclip.copy('The text to be copied to the clipboard.')
# spam = pyperclip.paste()
# On Mac, this module makes use of the pbcopy and pbpaste commands, which should come with the os.
# On Linux, this module makes use of the xclip command, which should come with the os. Otherwise run "sudo apt-get install xclip"
# Copyright (c) 2010, Albert Sweigart
# All rights reserved.
#
# BSD-style license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the pyperclip nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Albert Sweigart "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Albert Sweigart BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Change Log:
# 1.2 Use the platform module to help determine OS.
# 1.3 Changed ctypes.windll.user32.OpenClipboard(None) to ctypes.windll.user32.OpenClipboard(0), after some people ran into some TypeError
import platform, os
class NoClipboardProgramError(OSError):
pass
def winGetClipboard():
ctypes.windll.user32.OpenClipboard(0)
pcontents = ctypes.windll.user32.GetClipboardData(1) # 1 is CF_TEXT
data = ctypes.c_char_p(pcontents).value
#ctypes.windll.kernel32.GlobalUnlock(pcontents)
ctypes.windll.user32.CloseClipboard()
return data
def winSetClipboard(text):
GMEM_DDESHARE = 0x2000
ctypes.windll.user32.OpenClipboard(0)
ctypes.windll.user32.EmptyClipboard()
try:
# works on Python 2 (bytes() only takes one argument)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text))+1)
except TypeError:
# works on Python 3 (bytes() requires an encoding)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text, 'ascii'))+1)
pchData = ctypes.windll.kernel32.GlobalLock(hCd)
try:
# works on Python 2 (bytes() only takes one argument)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text))
except TypeError:
# works on Python 3 (bytes() requires an encoding)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text, 'ascii'))
ctypes.windll.kernel32.GlobalUnlock(hCd)
ctypes.windll.user32.SetClipboardData(1,hCd)
ctypes.windll.user32.CloseClipboard()
def macSetClipboard(text):
outf = os.popen('pbcopy', 'w')
outf.write(text)
outf.close()
def macGetClipboard():
outf = os.popen('pbpaste', 'r')
content = outf.read()
outf.close()
return content
def gtkGetClipboard():
return gtk.Clipboard().wait_for_text()
def gtkSetClipboard(text):
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def qtGetClipboard():
return str(cb.text())
def qtSetClipboard(text):
cb.setText(text)
def xclipSetClipboard(text):
outf = os.popen('xclip -selection c', 'w')
outf.write(text)
outf.close()
def xclipGetClipboard():
outf = os.popen('xclip -selection c -o', 'r')
content = outf.read()
outf.close()
return content
def xselSetClipboard(text):
outf = os.popen('xsel -i', 'w')
outf.write(text)
outf.close()
def xselGetClipboard():
outf = os.popen('xsel -o', 'r')
content = outf.read()
outf.close()
return content
if os.name == 'nt' or platform.system() == 'Windows':
import ctypes
getcb = winGetClipboard
setcb = winSetClipboard
elif os.name == 'mac' or platform.system() == 'Darwin':
getcb = macGetClipboard
setcb = macSetClipboard
elif os.name == 'posix' or platform.system() == 'Linux':
xclipExists = os.system('which xclip > /dev/null') == 0
if xclipExists:
getcb = xclipGetClipboard
setcb = xclipSetClipboard
else:
xselExists = os.system('which xsel > /dev/null') == 0
if xselExists:
getcb = xselGetClipboard
setcb = xselSetClipboard
else:
try:
import gtk
except ImportError:
try:
import PyQt4 as qt4
import PyQt4.QtCore
import PyQt4.QtGui
except ImportError:
try:
import PySide as qt4
import PySide.QtCore
import PySide.QtGui
except ImportError:
raise NoClipboardProgramError('Pyperclip requires the'
' gtk, PyQt4, or PySide'
' module installed, or '
'either the xclip or '
'xsel command.')
app = qt4.QtGui.QApplication([])
cb = qt4.QtGui.QApplication.clipboard()
getcb = qtGetClipboard
setcb = qtSetClipboard
else:
getcb = gtkGetClipboard
setcb = gtkSetClipboard
copy = setcb
paste = getcb
## pandas aliases
clipboard_get = paste
clipboard_set = copy
| mit |
timsnyder/bokeh | examples/custom/parallel_plot/parallel_plot.py | 1 | 4336 | import numpy as np
import pandas as pd
from bokeh.plotting import figure
from bokeh.layouts import column
from bokeh.models import (Range1d, ColumnDataSource, Div, LinearAxis,
LinearColorMapper, MultiLine,
FixedTicker, BasicTickFormatter, FuncTickFormatter)
from parallel_selection_tool import ParallelSelectionTool
from parallel_reset import ParallelResetTool
def parallel_plot(df, color=None, palette=None):
"""From a dataframe create a parallel coordinate plot
"""
npts = df.shape[0]
ndims = len(df.columns)
if color is None:
color = np.ones(npts)
if palette is None:
palette = ['#ff0000']
cmap = LinearColorMapper(high=color.min(),
low=color.max(),
palette=palette)
data_source = ColumnDataSource(dict(
xs=np.arange(ndims)[None, :].repeat(npts, axis=0).tolist(),
ys=np.array((df-df.min())/(df.max()-df.min())).tolist(),
color=color))
p = figure(x_range=(-1, ndims),
y_range=(0, 1),
width=1000,
tools="pan, box_zoom")
# Create x axis ticks from columns contained in dataframe
fixed_x_ticks = FixedTicker(
ticks=np.arange(ndims), minor_ticks=[])
formatter_x_ticks = FuncTickFormatter(
code="return columns[index]", args={"columns": df.columns})
p.xaxis.ticker = fixed_x_ticks
p.xaxis.formatter = formatter_x_ticks
p.yaxis.visible = False
p.y_range.start = 0
p.y_range.end = 1
p.y_range.bounds = (-0.1, 1.1) # add a little padding around y axis
p.xgrid.visible = False
p.ygrid.visible = False
# Create extra y axis for each dataframe column
tickformatter = BasicTickFormatter(precision=1)
for index, col in enumerate(df.columns):
start = df[col].min()
end = df[col].max()
bound_min = start + abs(end-start) * (p.y_range.bounds[0] - p.y_range.start)
bound_max = end + abs(end-start) * (p.y_range.bounds[1] - p.y_range.end)
p.extra_y_ranges.update(
{col: Range1d(start=bound_min, end=bound_max, bounds=(bound_min, bound_max))})
fixedticks = FixedTicker(
ticks=np.linspace(start, end, 8), minor_ticks=[])
p.add_layout(LinearAxis(fixed_location=index, y_range_name=col,
ticker=fixedticks, formatter=tickformatter), 'right')
# create the data renderer ( MultiLine )
# specify selected and non selected style
non_selected_line_style = dict(line_color='grey', line_width=0.1, line_alpha=0.5)
selected_line_style = dict(line_color={'field': 'color', 'transform': cmap}, line_width=1)
parallel_renderer = p.multi_line(
xs="xs", ys="ys", source=data_source, **non_selected_line_style)
# Specify selection style
selected_lines = MultiLine(**selected_line_style)
# Specify non selection style
nonselected_lines = MultiLine(**non_selected_line_style)
parallel_renderer.selection_glyph = selected_lines
parallel_renderer.nonselection_glyph = nonselected_lines
p.y_range.start = p.y_range.bounds[0]
p.y_range.end = p.y_range.bounds[1]
rect_source = ColumnDataSource({
'x': [], 'y': [], 'width': [], 'height': []
})
# add rectangle selections
selection_renderer = p.rect(x='x', y='y', width='width', height='height',
source=rect_source,
fill_alpha=0.7, fill_color='#009933')
selection_tool = ParallelSelectionTool(
renderer_select=selection_renderer, renderer_data=parallel_renderer,
box_width=10)
# custom resets (reset only axes not selections)
reset_axes = ParallelResetTool()
# add tools and activate selection ones
p.add_tools(selection_tool, reset_axes)
p.toolbar.active_drag = selection_tool
return p
if __name__ == '__main__':
from bokeh.palettes import Viridis256
from bokeh.io import show
df = pd.read_csv("https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv")
p = parallel_plot(df=df, color=df[df.columns[0]], palette=Viridis256)
div = Div(text="Select up and down column grid lines to define filters. Double click a filter to reset it.")
show(column(div, p))
| bsd-3-clause |
Nikea/pyXPCS | pyxpcs/correlator_online_new_mp.py | 1 | 21931 | #!/usr/bin/env python
# Licensed as BSD by Yuriy Chushkin of the ESRF on 2014-08-06
################################################################################
# Copyright (c) 2014, the European Synchrotron Radiation Facility #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# #
# * Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation #
# and/or other materials provided with the distribution. #
# #
# * Neither the name of the European Synchrotron Radiation Facility nor the #
# names of its contributors may be used to endorse or promote products #
# derived from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
################################################################################
#multiprocessing
import matplotlib.pylab as p
import sys, os.path, os, time,EdfFile,ytrc,threading
from numpy.ma import mask_or
from get_edf import file_name
from read_input import *
from some_modules_new import flatfield,rad_average, dropletize, loadedf, saveedf, headeredf
from commands import getoutput
from makemask import make_mask
from numpy import *
from multiprocessing import Process, Queue
from mp_corr import mp_corr
from scipy.optimize import leastsq
from IO import read_image,find_lagt
##################################################################
def dread(f,n,tot_darks,flat_field,static_corrected):
global ccd_img, I_avg, I_avg2, I_avgs, ttdata, tread_cum, tdrop_cum, tI_avg,monitor,totsaxs,wtotmask
ccd_img,monitor,tread,tdrop,totsaxs=read_image(f,input_info,flat_field,wtotmask,tot_darks,'none',static_corrected,totsaxs)
tread_more=time.time()
if q_2tcf!='none':
ttdata[n,:]=ccd_img[index_in_q[q_2tcf-1]]
if input_info['dropletize'].lower()== 'yes':
for k in range(nq):
I_avgs[n,k]=sum(ccd_img[index_in_q[k]])
else:
for k in range(nq):
I_avgs[n,k]=mean(ccd_img[index_in_q[k]])
I_avg[0,n]=I_avgs[n,0]
I_avg2[0,n]=I_avgs[n,nq/2]
tI_avg[0,n]= n*dt
tread_cum+=time.time()-tread_more+tread
return
def trc(matr):
meanmatr=mean(matr,axis=1)
tmp,lenmatr=shape(matr)
meanmatr.shape=1,tmp
trcm=dot(matr,matr.T)/lenmatr/dot(meanmatr.T,meanmatr)
return trcm
def errfunc(pa,xdata,ydata):return (pa[0]+pa[1]*xdata-ydata)/sqrt(ydata*1e-8)
def errfuncc(pc,xdata,ydata):return (pc[0]+pc[1]*xdata-ydata)/sqrt(ydata*1e-8)
def vartrc(ttc):
pc0=[1.0,0.1]
n,tmp=shape(ttc)
vtmp=[]
for it in xrange(1,n-1):
###for aging###
#ydata=diag(ttc,it)
#xdata=arange(1,len(ydata)+1)
#p1,success=leastsq(errfuncc,pc0,args=(xdata,ydata))
#vtmp.append(var(ydata/(p1[0]+p1[1]*xdata)))
###for aging###
vtmp.append(var(diag(ttc,it)))
return vtmp
def recurf(ll):
global l,y,v
y[ll+1].append((y[ll][0]+y[ll][1])*0.5)
y[ll]=[]
v[ll+1].append(vartrc(y[ll+1][-1]))
if l[ll+1]==1:
recurf(ll+1)
else:
l[ll+1]+=1
l[ll]=0
return
#function plotinf
def ttplot(corfp,srp,slp,n,I1,I2):
global tplot_cum,dt,firstfile
tplot=time.time()
rchplot=int(ceil(log(n/chn)/log(2))+1)
normplot=zeros((1,rcr),dtype=float32)
for ir in xrange(rchplot):
if ir==0:
normplot[0,:chn]=1./arange(n-2,n-chn-2,-1)
else:
normplot[0,chn2*(ir+1.):chn2*(ir+2.)]=1./arange((n-1)/(2**ir)-chn2-1,(n-1)/(2**ir)-chn-1,-1)
indt=int(chn+chn2*log(n/chn)/log(2))-2
cc1=corfp[0,:indt]/(slp[0,:indt]*srp[0,:indt])/normplot[0,:indt]
cc2=corfp[-1,:indt]/(slp[-1,:indt]*srp[-1,:indt])/normplot[0,:indt]
t_axis=lag[0,:indt]
t_axis2=tI_avg[0,:n]
t_axis2b=tI_avg[0,:n]/dt+firstfile
lm1.set_data(t_axis,cc1)
lm2.set_data(t_axis2,I1)
lm1b.set_data(t_axis,cc2)
lm2b.set_data(t_axis2b,I2)
ax1.set_xlim(min(t_axis),max(t_axis))
ax1.set_ylim(min(cc1),max(cc1))
ax1b.set_ylim(min(cc2),max(cc2))
ax2.set_xlim(min(t_axis2),max(t_axis2))
ax2b.set_xlim(min(t_axis2b),max(t_axis2b))
ax2.set_ylim(min(I1),max(I1))
ax2b.set_ylim(min(I2),max(I2))
p.draw()
tplot_cum+=time.time()-tplot
return
########################READING INPUT FILE##################################
def correlator_online_mp(fileinput='input.txt',dark_file='default',mask_file='default',plot='yes'):
global nq, n, chn,chn2,rcr,index_in_q, lag, dt, norm, nc, I_avg, I_avg2, lm1, lm2, lm1b, lm2b, ax1,ax1b, ax2, ax2b, nq, detector, ccd_img, flat_field, tot_darks, totmask, ttdata,tcalc_cum, tplot_cum, tread_cum, tI_avg,static_corrected, firstfile, tolerance, I_avgs, xnq, Mythread,l,y,v, input_info, wtotmask,totsaxs,tI_avg,q_2tcf
time1=time.time()
p.rc('image',origin = 'lower')
p.rc('image',interpolation = 'nearest')
p.close()
print 'multiprocessor'
print 'reading input...'
input_info=get_input(fileinput)
##processing input file#####
dir= input_info['dir']
dir_dark= input_info['dark dir']
if dir_dark=='none':
dir_dark=dir
file_prefix=input_info['file_prefix']
ext = input_info['file_suffix']
# New version has capabilities of reading also .gz files
if ext == '.edf.gz':
dataread=EdfFile.EdfGzipFile
else:
dataread=EdfFile.EdfFile
firstfile=int(input_info['n_first_image'])
lastfile=int(input_info['n_last_image'])+1
firstdark=input_info['n_first_dark']
if firstdark.lower() != 'none':
firstdark=int(input_info['n_first_dark'])
lastdark=int(input_info['n_last_dark'])+1
geometry=input_info['geometry'].lower()
tolerance=float32(float(input_info['tolerance']))
avgt = input_info['lag time'].lower()
if avgt=='auto':
lagt=[]
lagt1=0
for k in xrange(firstfile+40,firstfile+100):
filename=file_name(dir+file_prefix,ext,k)
while os.path.exists(filename) is False:
sys.stdout.write(50*'\x08')
sys.stdout.write('file '+filename+'still not ready')
sys.stdout.flush()
#rint 'file ' ,filename, 'still not ready'
time.sleep(10)
f=dataread(filename)
params=f.GetHeader(0)
if input_info['detector']=='medipix':
lagt2=float32(float(params['time_of_frame']))
lagt.append(lagt2-lagt1)
lagt1=lagt2
# if (input_info['detector']=='princeton' or input_info['detector']=='andor'):
else:
counters=params['counter_mne'].split(' ')
lagt_ind=counters.index('ccdtavg')
values=params['counter_pos'].split(' ')
lagt.append(float32(float(values[lagt_ind])))
del lagt[0]
dt=average(array(lagt,dtype=float32))
print 'lag time =', dt
else:
dt=float32(float(input_info['lag time']))
q_2tcf=(input_info['q for TRC']).lower()
if q_2tcf!='none':
q_2tcf=int(q_2tcf)
out_dir=get_dir(input_info['output directory'])
out_prefix=get_prefix(input_info['output filename prefix'])
out_tot=out_dir+out_prefix
##end processing input file#####
firstname=dir+file_name(file_prefix,ext,firstfile)
f=dataread(firstname)
ccd_info=f.GetStaticHeader(0)
ncol=int(ccd_info['Dim_1'])
nrows=int(ccd_info['Dim_2'])
static=out_tot+'static.edf'
static=EdfFile.EdfFile(static)
static_data=asfarray(static.GetData(0),dtype=float32)
if input_info['n_first_dark'].lower()=='none':
print 'not using darks'
tot_darks=0*static_data
else:
print 'using darks'
if dark_file=='default':
dark_file=out_tot+'dark.edf'
print 'using dark file:', dark_file
dark=EdfFile.EdfFile(dark_file)
tot_darks=asfarray(dark.GetData(0),dtype=float32)
toplot=static_data+.001 #to avoid zeros in plotting logarithm###
print '...done'
print '...reading q mask'
if mask_file=='default':
mask_file=out_tot+'mask.edf'
print 'using mask file:', mask_file
tot=EdfFile.EdfFile(mask_file)
totmask=float32(tot.GetData(0)+tot.GetData(1))
wtotmask=where(totmask==0)
p.ion()
fileq=out_tot+'qmask.edf'
file=EdfFile.EdfFile(fileq)
q=file.GetData(0)
maxval=int(amax(q)+2)
detector=input_info['detector']
flatfield_file=input_info['flatfield file']
if detector=='medipix':
flat_field=flatfield(detector,flatfield_file)
else:
flat_field=1.0
print '...done'
if geometry=='saxs':
print '...correcting static for baseline'
xbeam=int(input_info['x direct beam'])
ybeam=int(input_info['y direct beam'])
static_data=rad_average(static_data,totmask,xbeam,ybeam)
qaxis_list=[]
npix_per_q=[]
oneq=[]
index_in_q=[]
firstq=float32(float(input_info['first q']))
deltaq=float32(float(input_info['delta q']))
stepq=float32(float(input_info['step q']))
qvalue=firstq+deltaq/2
static_corrected=ones(shape(static_data),dtype=float32)
q*=abs(totmask-1)
total_pixels=0
for i in range(2,maxval,2):
indices=where(q==i)
index_in_q.append(indices)#gives the indices of pixels that are not masked at this q
if geometry=='saxs':
static_corrected[indices]=mean(static_data[indices])/static_data[indices]
npixel=len(static_data[indices])
npix_per_q.append(npixel)
oneq.append(ones((1,npixel)))
qaxis_list.append(qvalue)
qvalue+=deltaq+stepq
total_pixels+=npixel
print '...done'
nq=len(npix_per_q)
xnq=xrange(nq)
ncores=1
ncores=min(ncores,nq)
tmp_pix=0
q_sec=[]
if nq==1:
q_sec.append(0)
elif ncores>=nq:
q_sec=range(1,nq)
else:
for ii in xnq:
if tmp_pix<total_pixels/(ncores):
tmp_pix+=npix_per_q[ii]
if ii== nq-1:
q_sec.append(ii)
else:
q_sec.append(ii)
tmp_pix=0+npix_per_q[ii]
ncores=len(q_sec)
tmpdat=loadtxt(out_tot+'1Dstatic.dat')
qaxis=tmpdat[:,0]
I_q=tmpdat[:,1]
del tmpdat
##FINISHED INITIALIZING PART OF THE CODE######
##START MAIN PART FOR CORRELATION#####
chn=16.
chn2=chn/2
nfile=lastfile-firstfile
rch=int(ceil(log(nfile/chn)/log(2))+1)
###2time
if q_2tcf!='none':
ttdata=zeros((nfile,npix_per_q[q_2tcf-1]),dtype=float32)
###2time
rcr=chn+chn2*ceil(log(nfile/chn)/log(2))
lag=zeros((1,rcr),dtype=float32)
data_shape=p.shape(toplot)
smatr=zeros(data_shape,dtype=float32)
matr=zeros(data_shape,dtype=float32)
norm=zeros((1,rcr),dtype=float32)
for ir in xrange(rch):
if ir==0:
lag[0,:chn]=dt*arange(1,chn+1,1)
norm[0,:chn]=1./arange(nfile-2,nfile-chn-2,-1)
else:
lag[0,chn2*(ir+1):chn2*(ir+2)]=(dt*2**ir)*arange(1+chn2,chn+1)
norm[0,chn2*(ir+1):chn2*(ir+2)]=1./arange((nfile-1)/(2**ir)-chn2-1,(nfile-1)/(2**ir)-chn-1,-1)
#END of declaring and initializing variables####
#READING FILES
filenames=[]
for k in xrange(firstfile,lastfile):
filenames.append(file_name(file_prefix,ext,k))
n=0
if plot!='no':
ax1=p.axes([0.11, 0.08, 0.75, 0.57])
ax1.set_xlabel('t [sec]')
ax1.set_ylabel('g^2(q,t)')
ax1b=p.twinx(ax1)
ax1b.yaxis.tick_right()
ax2=p.axes([0.11, 0.73, 0.75, 0.19])
ax2.xaxis.tick_bottom()
ax2.set_xlabel('t [sec]')
ax2.set_ylabel('I(q,t) [a.u.]')
ax2b=p.gcf().add_axes(ax2.get_position(),frameon=False)
ax2b.xaxis.tick_top()
ax2b.yaxis.tick_right()
ax2b.xaxis.set_label_position('top')
ax2b.set_xlabel('Image no.')
label1='q= %2.1e 1/Ang' % qaxis_list[0]
label2='q= %2.1e 1/Ang' % qaxis_list[nq/2]
lm1,=ax1.semilogx((1,),(1,),'ro-',label=label1)
lm1b,=ax1b.semilogx((1,),(1,),'bo-',label=label2)
ax1.legend(loc='lower left')
ax1b.legend(loc=(0.02,0.1))
lm2,=ax2.plot((1,),(1,),'r-')
lm2b,=ax2b.plot((1,),(1,),'b-')
p.setp(ax1.get_yticklabels(), color='r')
p.setp(ax1b.get_yticklabels(), color='b')
p.setp(ax2.get_yticklabels(), color='r')
p.setp(ax2b.get_yticklabels(), color='b')
tplot_cum=0
tread_cum=0
tcalc_cum=0
tqueue_cum=0
I_avg=zeros((1,nfile),float32)
I_avg2=zeros((1,nfile),float32)
I_avgs=zeros((nfile,nq),float32)
tI_avg=zeros((1,nfile),float32)
mon=zeros((1,nfile),int16)
detector=input_info['detector'].lower()
Mythread=threading.Thread
checkfile=os.path.exists
n=0
totsaxs=0*static_data
goodsize=os.path.getsize(dir+filenames[n])
nnfile=nfile-1
#if plot!='no':
# tmpf=lambda x : True
# thplot=Process(target=tmpf,args=([0]))
# thplot.start()
######################multiprocessing#######################################################
qur=[]
qure=[]
pcorr=[]
for i in xrange(ncores):
qur.append(Queue())
qure.append(Queue())
#qur.append(Queue())
quplot=Queue()
for i in xrange(ncores):
if i==0:
q_beg=0
else:
q_beg=q_sec[i-1]
q_end=q_sec[i]
if i==ncores-1:
q_end=nq
pcorr.append(Process(target=mp_corr, args=(i,nfile,chn,plot,npix_per_q[q_beg:q_end],index_in_q[q_beg:q_end],qur[i],qure[i],quplot)))
for i in xrange(ncores):
pcorr[i].start()
n=0
nc=0
nnfile=nfile-1
if input_info['normalize'].lower()!= 'none':
normalize=input_info['normalize']
print "normalizing to ", input_info['normalize']
else:
print "not normalizing"
while n<nnfile:
tread=time.time()
nc=n+1
file=filenames[n]
tmf=dir+file
wait=0
t0=time.time()
stop=0
while checkfile(tmf) is False:
p.draw()
sys.stdout.write(50*'\x08')
sys.stdout.write('waiting for file'+ file+'...')
sys.stdout.flush()
t1=time.time()
wait+=t1-t0
time.sleep(dt)
t0=t1
if wait>10*dt:
print nfile
ans=raw_input('\n will this file ever arrive? (y/N)')
if ans.lower()=='y':
print '\n keep waiting...\n'
time.sleep(3*dt)
wait=0
else:
stop=1
nfile=n+1
break
if stop==1:
break
if ext=='.edf':
filesize=os.path.getsize(tmf)
while filesize!=goodsize:
sys.stdout.write(50*'\x08')
sys.stdout.write('file '+ file+'still not ready...')
sys.stdout.flush()
time.sleep(dt)
filesize=os.path.getsize(tmf)
f=dataread(tmf)
dread(f,n,tot_darks,flat_field,static_corrected)
mon[0,n]=monitor
#for plot. TO be faster, I only updated plot each chn files.
jj=0
tmp_put=[]
tqueue=time.time()
for i in xnq:
if i <q_sec[jj]:
tmp_put.append(ccd_img[index_in_q[i]])
elif i==nq-1:
tmp_put.append(ccd_img[index_in_q[i]])
qur[jj].put(tmp_put)
else:
qur[jj].put(tmp_put)
tmp_put=[]
tmp_put.append(ccd_img[index_in_q[i]])
jj+=1
tqueue_cum+=time.time()-tqueue
if nc%chn==0:
pct=100.0*n/nfile
sys.stdout.write(50*'\x08')
sys.stdout.write('read '+str(int(pct))+'% of files'+32*' ')
sys.stdout.flush()
if plot!='no':
#thplot.join()
xx=quplot.get()
ttplot(xx[0],xx[1],xx[2],n+1,I_avg[0,:n+1],I_avg2[0,:n+1])
#thplot=Process(target=ttplot,args=([xx[0],xx[1],xx[2],n+1,I_avg[0,:n+1],I_avg2[0,:n+1]]))
#thplot.start()
#thplot.join()
n+=1
#if plot!='no':
#thplot.join()
sys.stdout.write(50*'\x08')
sys.stdout.flush()
print "read 100% of files"
###############################################################################################
from_proc=[]
for i in xrange(ncores):
from_proc.append(qure[i].get())
pcorr[i].join()
qure[i].close
#############################################################################################
#END OF MAIN LOOP
#calculate 2 times correlation function
print "saving results..."
if stop==1:
tI_avg=tI_avg[:,:nfile]
mon=mon[:,:nfile]
I_avgs=I_avgs[:nfile,:]
rch=int(ceil(log(nfile/nchannels)/log(2))+1)
for ir in xrange(rch):
if ir==0:
norm[0,:nchannels]=1./arange(nfile-2,nfile-nchannels-2,-1)
else:
norm[0,nchannels2*(ir+1):nchannels2*(ir+2)]=1./arange((nfile-1)/(2**ir)-nchannels2-1,(nfile-1)/(2**ir)-nchannels-1,-1)
#calculate correlation functions
corf=from_proc[0][0]
sl=from_proc[0][1]
sr=from_proc[0][2]
tcalc_cum=from_proc[0][3]
for i in xrange(1,ncores):
corf=concatenate((corf,from_proc[i][0]),axis=0)
sl=concatenate((sl,from_proc[i][1]),axis=0)
sr=concatenate((sr,from_proc[i][2]),axis=0)
tcalc_cum=max(tcalc_cum,from_proc[i][3])
indt=int(chn+chn2*log(nfile/chn)/log(2))-2
cc=zeros((indt,nq+1),float32)
q_title='#q values:'
trace_title='#file_no. , time, monitor, q values:'
for cindex in xnq:
q_title=q_title+' '+str(qaxis_list[cindex])
trace_title=trace_title+' '+str(qaxis_list[cindex])
cc[:,cindex+1]=corf[cindex,:indt]/(sl[cindex,:indt]*sr[cindex,:indt])/\
norm[0,:indt]
cc[:,0]=lag[0,:indt]
q_title=q_title+'\n'
trace_title=trace_title+'\n'
del indt
f=open(out_tot+'cf.dat','w')
f.write(q_title)
savetxt(f, cc)
f.close()
del cc
f=open(out_tot+'trace.dat','w')
f.write(trace_title)
traces=zeros((nfile,nq+3),float32)
traces[:,0]=tI_avg/dt+firstfile
traces[:,1]=tI_avg
traces[:,2]=mon
traces[:,3:]=I_avgs
savetxt(f,traces)
f.close()
del traces
static=out_tot+'static.edf'
static=EdfFile.EdfFile(static)
totsaxs=totsaxs/n-tot_darks
totsaxs[totsaxs<=0]=0
static.WriteImage({},totsaxs,0)
del static
print 'correlation functions are saved to ', out_tot+'cf.dat'
print 'traces are saved to ', out_tot+'trace.dat'
if plot!='no':
p.hold(True)
p.close()
if q_2tcf!='none':
print "calculating time resolved cf and chi4..."
if nfile>6000: #this is for 4 GB RAM PC
nfile=6000
n=6000
lind2=npix_per_q[q_2tcf-1]/16
l=arange(5)*0
y=[]
v=[]
for i in range(5):
y.append([])
v.append([])
ib=0
for i in xrange(16):
sys.stdout.write(50*'\x08')
sys.stdout.write('done '+str(int(i/16.*100))+'% of data'+32*' ')
sys.stdout.flush()
ie=ib+lind2
y[0].append(trc(ttdata[:n-1,ib:ie]))
v[0].append(vartrc(y[0][-1]))
if l[0]==1:
recurf(0)
else:
l[0]+=1
ib+=lind2
vm=[]
for i in range(4,-1,-1):
vm.append(mean(v[i],0))
vm=array(vm)
del ttdata
del v
sys.stdout.write(50*'\x08')
sys.stdout.flush()
file_2times=out_tot+'2times_q_'+str(q_2tcf)+'.edf'
ytrc.write(file_2times,y[4][0])
print 'Time resolved CF is saved to '+ out_tot+'2times_q_'+str(q_2tcf)+'.edf'
N=array([[1],[2],[4],[8],[16]])/float(npix_per_q[q_2tcf-1])
data=concatenate((N,vm),1).T
#print 'number of pixels ',lind[ttcf_par]
#print 'q value=', qv[ttcf_par]
p0=[0.0,1.0]
it=range(len(data[1:,0]))
p1=zeros((len(data[1:,0]),len(p0)+1))
p1[:,0]=(asfarray(it)+1.0)*dt
xdata=data[0,:]
for i in it:
ydata=data[i+1,:]
p1[i,1:],success=leastsq(errfunc,p0,args=(xdata,ydata))
outfile=out_tot+'fitchi4_q_'+str(q_2tcf)+'.dat'
f=open(outfile,'w')
f.write("#time chi4 error q value:"+str(qaxis_list[q_2tcf-1])+"\n")
savetxt(f,p1)
f.close()
print 'file is saved to '+outfile
print "saving results..."
time2=time.time()
print 'elapsed time', time2-time1
print 'elapsed time for plotting', tplot_cum
print 'elapsed time for reading', tread_cum
print 'elapsed time for correlating', tcalc_cum
print 'elapsed time for queueing', tqueue_cum
print 'used ncores=', ncores
| bsd-3-clause |
landlab/drivers | component_drivers/cellular_automata/rock_weathering.py | 3 | 5558 | #!/usr/env/python
"""
rock_weathering.py
CellLab-CTS model that simulates the weathering of rock to saprolite around
a network of fractures.
Created (and translated from earlier code by) by Greg Tucker, Jul 2015
"""
from __future__ import print_function
import time
import numpy as np
from landlab import RasterModelGrid
from landlab.components.cellular_automata.celllab_cts import Transition, CAPlotter
from landlab.components.cellular_automata.raster_cts import RasterCTS
from landlab.components.fracture_grid.fracture_grid import make_frac_grid
import matplotlib
from landlab.io.netcdf import write_netcdf
def setup_transition_list():
"""
Creates and returns a list of Transition() objects to represent the
grain-by-grain transformation of bedrock to saprolite.
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
Weathering here is treated very simply: a bedrock particle adjacent to a
saprolite particle has a specified probability (rate) of weathering to
saprolite; in other words, a rock-saprolite pair can turn into a
saprolite-saprolite pair.
The states and transitions are as follows:
Pair state Transition to Process Rate (cells/s)
========== ============= ======= ==============
0 (0-0) 1 (0-1) 0.5
2 (1-0) 0.5
1 (0-1) 3 (1-1) 1.0
2 (1-0) 3 (1-1) 1.0
3 (1-1) (none) -
"""
# Create an empty transition list
xn_list = []
# Append two transitions to the list.
# Note that the arguments to the Transition() object constructor are:
# - Tuple representing starting pair state
# (left/bottom cell, right/top cell, orientation)
# - Tuple representing new pair state
# (left/bottom cell, right/top cell, orientation)
# - Transition rate (cells per time step, in this case 1 sec)
# - Name for transition
xn_list.append( Transition((0,1,0), (1,1,0), 1., 'weathering') )
xn_list.append( Transition((1,0,0), (1,1,0), 1., 'weathering') )
return xn_list
def main():
# INITIALIZE
# User-defined parameters
nr = 200 # number of rows in grid
nc = 200 # number of columns in grid
plot_interval = 0.05 # time interval for plotting (unscaled)
run_duration = 5.0 # duration of run (unscaled)
report_interval = 10.0 # report interval, in real-time seconds
frac_spacing = 10 # average fracture spacing, nodes
outfilename = 'wx' # name for netCDF files
# Remember the clock time, and calculate when we next want to report
# progress.
current_real_time = time.time()
next_report = current_real_time + report_interval
# Counter for output files
time_slice = 0
# Create grid
mg = RasterModelGrid(nr, nc, 1.0)
# Make the boundaries be walls
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
# Set up the states and pair transitions.
ns_dict = { 0 : 'rock', 1 : 'saprolite' }
xn_list = setup_transition_list()
# Create the node-state array and attach it to the grid.
# (Note use of numpy's uint8 data type. This saves memory AND allows us
# to write output to a netCDF3 file; netCDF3 does not handle the default
# 64-bit integer type)
node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=np.uint8)
node_state_grid[:] = make_frac_grid(frac_spacing, model_grid=mg)
# Create the CA model
ca = RasterCTS(mg, ns_dict, xn_list, node_state_grid)
# Set up the color map
rock_color = (0.8, 0.8, 0.8)
sap_color = (0.4, 0.2, 0)
clist = [rock_color, sap_color]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
ca_plotter = CAPlotter(ca, cmap=my_cmap)
# Plot the initial grid
ca_plotter.update_plot()
# Output the initial grid to file
write_netcdf((outfilename+str(time_slice)+'.nc'), mg,
#format='NETCDF3_64BIT',
names='node_state_map')
# RUN
current_time = 0.0
while current_time < run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= next_report:
print('Current sim time', current_time, '(',
100 * current_time/run_duration, '%)')
next_report = current_real_time + report_interval
# Run the model forward in time until the next output step
ca.run(current_time+plot_interval, ca.node_state,
plot_each_transition=False)
current_time += plot_interval
# Plot the current grid
ca_plotter.update_plot()
# Output the current grid to a netCDF file
time_slice += 1
write_netcdf((outfilename+str(time_slice)+'.nc'), mg,
#format='NETCDF3_64BIT',
names='node_state_map')
# FINALIZE
# Plot
ca_plotter.finalize()
# If user runs this file, activate the main() function
if __name__ == "__main__":
main()
| mit |
springer-math/Mathematics-of-Epidemics-on-Networks | docs/examples/fig9p5.py | 1 | 2607 | import EoN
import networkx as nx
import matplotlib.pyplot as plt
import random
import scipy
print("for figure 9.5, we have not coded up the equations to calculate size as a function of tau (fig a), so this just gives simulations. It does calculate the predicted size as a function of R_0^p. (fig b)")
r'''
Rather than doing the dynamic simulations, this uses the directed percolation approach
described in chapter 6.
'''
N = 100000
gamma = 1./5.5
tau = 0.55
iterations = 1
rho = 0.001
kave=15
def rec_time_fxn_gamma(u, alpha, beta):
return scipy.random.gamma(alpha,beta)
def rec_time_fxn_fixed(u):
return 1
def rec_time_fxn_exp(u):
return random.expovariate(1)
def trans_time_fxn(u, v, tau):
if tau >0:
return random.expovariate(tau)
else:
return float('Inf')
def R0first(tau):
return (kave-1) * (1- 4/(2+tau)**2)
def R0second(tau):
return (kave-1) * (1- 1/scipy.sqrt(1+2*tau))
def R0third(tau):
return (kave-1)*tau/(tau+1)
def R0fourth(tau):
return (kave-1)*(1-scipy.exp(-tau))
G = nx.configuration_model([kave]*N)
taus = scipy.linspace(0,0.35,21)
def do_calcs_and_plot(G, trans_time_fxn, rec_time_fxn, trans_time_args, rec_time_args, R0fxn, symbol):
As = []
for tau in taus:
P, A = EoN.estimate_nonMarkov_SIR_prob_size_with_timing(G,trans_time_fxn=trans_time_fxn,
rec_time_fxn = rec_time_fxn,
trans_time_args = (tau,),
rec_time_args=rec_time_args)
As.append(A)
plt.figure(1)
plt.plot(taus, As, symbol)
plt.figure(2)
plt.plot( R0fxn(taus), As, symbol)
print("first distribution")
do_calcs_and_plot(G, trans_time_fxn, rec_time_fxn_gamma, (tau,), (2,0.5), R0first, 'o')
print("second distribution")
do_calcs_and_plot(G, trans_time_fxn, rec_time_fxn_gamma, (tau,), (0.5,2), R0second, 's')
print("fourth distribution")
do_calcs_and_plot(G, trans_time_fxn, rec_time_fxn_exp, (tau,), (), R0third, 'd')
print("fifth distribution")
do_calcs_and_plot(G, trans_time_fxn, rec_time_fxn_fixed, (tau,), (), R0fourth, 'x')
plt.figure(1)
plt.xlabel(r'Transmission rate $\tau$')
plt.ylabel('Final Size')
plt.savefig('fig9p5a.png')
R0s = scipy.linspace(0,3,301)
ps = R0s/(kave-1)
Apred = [EoN.Attack_rate_discrete({kave:1}, p) for p in ps]
plt.figure(2)
plt.plot(R0s, Apred, '-', color = 'k')
plt.axis(xmax = 3)
plt.xlabel('Pairwise Reproductive Ratio $R_0^p$')
plt.ylabel('Final Size')
plt.savefig('fig9p5b.png')
| mit |
momijiame/rakuza | rakuza/exchanges/bitflyer.py | 1 | 7893 | # -*- coding: utf-8 -*-
import time
import json
import hashlib
import hmac
from datetime import datetime
from urllib import parse
from oslo_config import cfg
import requests
import pandas as pd
from ..autologging import patch as monkey_patch
from ..exceptions import InvalidHTTPStatusCodeError
from .base import ExchangeBase
from .base import TickerMixin, TickerData
from .base import BoardMixin, BoardData
from .base import BalanceMixin, BalanceData
from .base import Side
from .base import OrderMixin, OrderData, OrderType
from .base import ContractMixin, ContractData
cfg_group = cfg.OptGroup('bitflyer')
cfg_opts = [
cfg.StrOpt(
'api_key',
help='bitFlyer Lightning API key',
),
cfg.StrOpt(
'api_secret',
help='bitFlyer Lightning API secret',
),
]
CONF = cfg.CONF
CONF.register_group(cfg_group)
CONF.register_opts(cfg_opts, group=cfg_group)
class BitflyerOrderData(OrderData):
def fetch_contracts(self, order_id: str) -> list:
"""約定の情報を取得するメソッド"""
return _fetch_contracts(order_id)
class BitflyerExchange(ExchangeBase,
TickerMixin,
BoardMixin,
BalanceMixin,
OrderMixin,
ContractMixin):
"""bitFlyer (https://lightning.bitflyer.com/)
API 仕様: https://lightning.bitflyer.com/docs
"""
@property
def name(self):
return 'bitflyer'
# Public APIs
def ticker(self) -> TickerData:
"""ティッカー情報を取得する"""
r = requests.get('https://api.bitflyer.com/v1/ticker')
if r.status_code != 200:
raise InvalidHTTPStatusCodeError(200, r.status_code)
body = r.json()
bid = body['best_bid']
ask = body['best_ask']
return TickerData(bid, ask)
def board(self) -> BoardData:
"""板情報を取得する"""
r = requests.get('https://api.bitflyer.com/v1/board')
if r.status_code != 200:
raise InvalidHTTPStatusCodeError(200, r.status_code)
body = r.json()
bids = body['bids']
asks = body['asks']
bids_df = pd.DataFrame.from_dict(bids)
asks_df = pd.DataFrame.from_dict(asks)
bids_nparray = bids_df.to_numpy()
asks_nparray = asks_df.to_numpy()
return BoardData(bids_nparray, asks_nparray)
# Private APIs
def balance(self) -> BalanceData:
path = '/v1/me/getbalance'
body = private_request(path)
mappings = {
entry.get('currency_code'): entry.get('amount')
for entry in body
}
jpy = mappings.get('JPY')
btc = mappings.get('BTC')
return BalanceData(jpy, btc)
def order_buy_market(self, size: float) -> OrderData:
"""成行での買い注文"""
order_id = _request_order('BUY', size)
return BitflyerOrderData(order_id, Side.BUY, OrderType.MARKET, size)
def order_sell_market(self, size: float) -> OrderData:
"""成行での売り注文"""
order_id = _request_order('SELL', size)
return BitflyerOrderData(order_id, Side.SELL, OrderType.MARKET, size)
def order_buy_limit(self, size: float, price: float) -> OrderData:
"""指値での買い注文"""
order_id = _request_order('BUY', size, price)
return BitflyerOrderData(order_id, Side.BUY, OrderType.LIMIT, size, price)
def order_sell_limit(self, size: float, price: float) -> OrderData:
"""指値での売り注文"""
order_id = _request_order('SELL', size, price)
return BitflyerOrderData(order_id, Side.SELL, OrderType.LIMIT, size, price)
def contracts(self, order_id: str = None) -> ContractData:
"""約定を取得する"""
return _fetch_contracts(order_id)
def _fetch_contracts(order_id: str = None) -> ContractData:
"""約定を取得する関数"""
path = '/v1/me/getexecutions'
query_params = {}
if order_id is not None:
query_params.update({
'child_order_acceptance_id': order_id,
})
# 絞り込み条件があるときはクエリ文字列を付与する
if len(query_params) > 0:
query_string = parse.urlencode(query_params)
path += f'?{query_string}'
contracts = private_request(path)
return [_extract_contract_data(contract) for contract in contracts]
def _extract_contract_data(contract_dict):
"""個別の約定情報が入った辞書から情報を取り出す"""
contract_id = contract_dict['id']
order_id = contract_dict['child_order_acceptance_id']
contract_side = Side.from_string(contract_dict['side'])
contract_price = contract_dict['price']
contract_size = contract_dict['size']
raw_timestamp = contract_dict['exec_date']
exec_timestamp = datetime.strptime(raw_timestamp, '%Y-%m-%dT%H:%M:%S.%f')
contract_data = ContractData(contract_id=contract_id,
order_id=order_id,
exec_timestamp=exec_timestamp,
contract_side=contract_side,
contract_price=contract_price,
contract_size=contract_size)
return contract_data
def _request_order(side: str, size: float, price: float = None):
"""注文する関数"""
req_path = '/v1/me/sendchildorder'
req_body = {
# 注文するプロダクト
'product_code': 'BTC_JPY',
# 注文種別
'child_order_type': 'MARKET',
# 書い: 'BUY', 売り: 'SELL'
'side': str(side.value).upper(),
# 数量
'size': size,
# 期限切れまでの時間 (default: 43200 = 30 days)
'minute_to_expire': 1,
# 執行数量条件 (詳細: https://lightning.bitflyer.com/docs/specialorder)
'time_in_force': 'FOK', # FOK = Fill or Kill: 全数量が即座に約定したいときはキャンセルする
}
# 価格が指定されていれば指値
if price is not None:
req_body.update({
'child_order_type': 'LIMIT',
'price': price,
})
resp_body = private_request(req_path, method='POST', body=req_body)
return str(resp_body['child_order_acceptance_id'])
def private_request(path: str, method: str = 'GET', body: dict = None):
"""Private API を呼び出す関数"""
base_url = 'https://api.bitflyer.com'
req_url = base_url + path
json_body = json.dumps(body) if body is not None else ''
headers = _private_api_headers(method, path, json_body or '')
if body is not None:
headers['Content-Type'] = 'application/json'
request_method = getattr(requests, method.lower())
r = request_method(req_url, headers=headers, data=json_body)
if r.status_code != 200:
raise InvalidHTTPStatusCodeError(200, r.status_code)
return r.json()
def _private_api_headers(method: str, path: str, body: str):
"""Private API の呼び出しに必要な HTTP ヘッダを計算する関数"""
api_key = CONF.bitflyer.api_key
api_secret = CONF.bitflyer.api_secret
assert isinstance(api_key, str), 'Please set \'api_key\' of [bitflyer]'
assert isinstance(api_secret, str), 'Please set \'api_secret\' of [bitflyer]'
timestamp = str(int(time.time()))
message = timestamp + method + path + body
encoded_message = message.encode('ASCII')
encoded_api_secret = api_secret.encode('ASCII')
h = hmac.new(encoded_api_secret, encoded_message, hashlib.sha256)
sign = h.hexdigest()
headers = {
'ACCESS-KEY': api_key,
'ACCESS-TIMESTAMP': timestamp,
'ACCESS-SIGN': sign,
}
return headers
# 公開メンバの呼び出しを自動でログに記録する
monkey_patch(__name__)
| gpl-3.0 |
michigraber/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
ky822/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/scalar/interval/test_ops.py | 4 | 2336 | """Tests for Interval-Interval operations, such as overlaps, contains, etc."""
import pytest
from pandas import Interval, Timedelta, Timestamp
@pytest.fixture(
params=[
(Timedelta("0 days"), Timedelta("1 day")),
(Timestamp("2018-01-01"), Timedelta("1 day")),
(0, 1),
],
ids=lambda x: type(x[0]).__name__,
)
def start_shift(request):
"""
Fixture for generating intervals of types from a start value and a shift
value that can be added to start to generate an endpoint
"""
return request.param
class TestOverlaps:
def test_overlaps_self(self, start_shift, closed):
start, shift = start_shift
interval = Interval(start, start + shift, closed)
assert interval.overlaps(interval)
def test_overlaps_nested(self, start_shift, closed, other_closed):
start, shift = start_shift
interval1 = Interval(start, start + 3 * shift, other_closed)
interval2 = Interval(start + shift, start + 2 * shift, closed)
# nested intervals should always overlap
assert interval1.overlaps(interval2)
def test_overlaps_disjoint(self, start_shift, closed, other_closed):
start, shift = start_shift
interval1 = Interval(start, start + shift, other_closed)
interval2 = Interval(start + 2 * shift, start + 3 * shift, closed)
# disjoint intervals should never overlap
assert not interval1.overlaps(interval2)
def test_overlaps_endpoint(self, start_shift, closed, other_closed):
start, shift = start_shift
interval1 = Interval(start, start + shift, other_closed)
interval2 = Interval(start + shift, start + 2 * shift, closed)
# overlap if shared endpoint is closed for both (overlap at a point)
result = interval1.overlaps(interval2)
expected = interval1.closed_right and interval2.closed_left
assert result == expected
@pytest.mark.parametrize(
"other",
[10, True, "foo", Timedelta("1 day"), Timestamp("2018-01-01")],
ids=lambda x: type(x).__name__,
)
def test_overlaps_invalid_type(self, other):
interval = Interval(0, 1)
msg = f"`other` must be an Interval, got {type(other).__name__}"
with pytest.raises(TypeError, match=msg):
interval.overlaps(other)
| bsd-3-clause |
Vkomini/KU-Leuven | Computer Vision/Assignments/Assign1/smoothing1.py | 1 | 2402 | '''
Gaussian smoothing with Python.
'''
import cv2
import numpy as np
import math
import os
from scipy import signal
import matplotlib.pyplot as plt
def gaussian_filter(sigma, filter_length):
'''
Given a sigma, return a 1-D Gaussian filter.
@param sigma: float, defining the width of the filter
@param filter_length: optional, the length of the filter, has to be odd
@return A 1-D numpy array of odd length,
containing the symmetric, discrete approximation of a Gaussian with sigma
Summation of the array-values must be equal to one.
'''
if filter_length == None:
# determine the length of the filter
filter_length = math.ceil (sigma * 5)
# make the length odd
filter_length = 2 * (int (filter_length) / 2) + 1
# make sure sigma is a float
sigma = float (sigma)
# create the filter
result1 = np.zeros ((int (filter_length)))
filtercenter = int (np.round (filter_length) / 2)
for j in range (int (filter_length)):
result1[j] = np.exp (-1 * ((j - filtercenter) ** 2) / (sigma ** 2) / 2)
result1 /= 2
return result1
def test_gaussian_filter():
'''
Test the Gaussian filter on a known input.
'''
sigma = math.sqrt (1.0 / 2 / math.log (2))
f = gaussian_filter (sigma, filter_length=3)
correct_f = np.array ([0.25, 0.5, 0.25])
error = np.abs (f - correct_f)
if np.sum (error) < 0.001:
print ("Congratulations, the filter works!")
else:
print ("Still some work to do..")
def gaussian_smooth1(image, sigma):
# initialize the matrix
result = np.zeros_like (image)
# get the filtercv2.waitKey(0)
kernel1D = gaussian_filter (sigma, 5)
size = image.shape
# smooth every color-channe
for c in range (size[2]):
for j in range (size[1]):
result[:, j, c] = np.convolve (image[:, j, c], kernel1D, 'same')
for j in range (size[0]):
result[j, :, c] = np.convolve (image[j, :, c], kernel1D, 'same')
return result
# this part of the code is only executed if the file is run stand-alone
if __name__ == '__main__':
# test the gaussian filter
test_gaussian_filter ()
img = cv2.imread ('image.jpg')
smoothed_img = gaussian_smooth1 (img, 0.9)
# Showing the results
plt.subplot (121)
plt.imshow (img, cmap='gray')
plt.title ('Original Image'), plt.xticks ([]), plt.yticks ([])
plt.subplot (122)
plt.imshow (smoothed_img, cmap='gray')
plt.title ('Smoothed Image'), plt.xticks ([]), plt.yticks ([])
plt.show ()
| apache-2.0 |
ovilab/atomify-lammps | libs/lammps/python/examples/matplotlib_plot.py | 22 | 2270 | #!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# matplotlib_plot.py
# Purpose: plot Temp of running LAMMPS simulation via matplotlib
# Syntax: plot.py in.lammps Nfreq Nsteps compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point every this many steps
# Nsteps = run for this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
from __future__ import print_function
import sys
sys.path.append("./pizza")
import matplotlib
matplotlib.use('tkagg')
import matplotlib.pyplot as plt
# parse command line
argv = sys.argv
if len(argv) != 5:
print("Syntax: plot.py in.lammps Nfreq Nsteps compute-ID")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
compute = sys.argv[4]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
# initial 0-step run to generate initial 1-point plot
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
# create matplotlib plot
# just proc 0 handles plotting
if me == 0:
fig = plt.figure()
line, = plt.plot(xaxis, yaxis)
plt.xlim([0, nsteps])
plt.title(compute)
plt.xlabel("Timestep")
plt.ylabel("Temperature")
plt.show(block=False)
# run nfreq steps at a time w/out pre/post, query compute, refresh plot
import time
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
if me == 0:
line.set_xdata(xaxis)
line.set_ydata(yaxis)
ax = plt.gca()
ax.relim()
ax.autoscale_view(True, True, True)
fig.canvas.draw()
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
#pypar.finalize()
if sys.version_info[0] == 3:
input("Press Enter to exit...")
else:
raw_input("Press Enter to exit...")
| gpl-3.0 |
teese/pytoxr | pytoxr/parse.py | 1 | 31478 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pytoxr contains tools for the analysis of data from ToxR experiments
Copyright (C) 2016 Mark George Teese
This software is licensed under the permissive MIT License...
"""
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns
import sys
def parse_softmax(txt_path, samples_path):
"""Parse the text file output from Sofmax Pro, using the standard A600 endpoint & A405 kinetic template.
This method uses the initial velocities calculated by Softmax Pro.
Parameters
----------
txt_path : str
Path to txt data file.
samples_path : str
Path to an excel file with all of the sample names.
An example of this text file should be in the "examples" subfolder.
Dataframes
----------
df = raw softmax pro output
dfd = dataframe for data.
index = Vm#, columns = Vi1 Vi2 Vi3 MU1 MU2, etc
dfnu = dataframe for normalised, unique data
index = sample_name, columns = as above for dfd
Returns
-------
dfd : pd.DataFrame
Dataframe for data.
"""
exp_name = os.path.basename(txt_path)[:-4][0:60]
out_dir = os.path.join(os.path.dirname(txt_path), exp_name)
# path to output heatmap of raw OD600 values
out_OD600_heatmap = os.path.join(out_dir, "{}_OD600_heatmap.png".format(exp_name))
# path to excel output file
# for compatibility purposes, currently only excel 2003 is recommended
excel_format = ".xls"
out_parsed_excel = os.path.join(out_dir, "{}_parsed{}".format(exp_name, excel_format))
# only the non-normalised data is saved as csv
out_parsed_csv = os.path.join(out_dir, "{}_parsed.csv".format(exp_name))
# create output directory
if not os.path.exists(out_dir):
os.makedirs(out_dir)
"""
FORMAT OF THE OD600 section of the SOFTMAX PRO FILE
~End
Plate: OD600 1,3 PlateFormat Endpoint Absorbance Raw TRUE 1 1 600 1 12 96 1 8 None
Temperature(¡C) 1 2 3 4 5 6 7 8 9 10 11 12
24,10 0,1094 0,1322 0,121 0,117 0,1214 0,1239 0,1128 0,1219 0,1191 0,1152 0,1172 0,1164
0,153 0,1564 0,1582 0,1518 0,1636 0,1528 0,1448 0,1651 0,1371 0,1484 0,1491 0,1509
0,1194 0,1218 0,1266 0,12 0,1171 0,1252 0,1155 0,1227 0,123 0,1204 0,1221 0,1159
0,1217 0,1237 0,1239 0,119 0,1217 0,1245 0,1168 0,1241 0,1207 0,1168 0,1203 0,1203
0,1152 0,119 0,1402 0,1184 0,1443 0,1219 0,1193 0,1254 0,1206 0,1173 0,1167 0,1165
0,1253 0,1313 0,1435 0,1232 0,1261 0,1298 0,1239 0,1315 0,1133 0,1193 0,1157 0,1178
0,1136 0,1143 0,1359 0,1172 0,1373 0,1275 0,1159 0,1281 0,1224 0,1195 0,1168 0,1143
0,1078 0,1206 0,1243 0,1139 0,1199 0,1229 0,1172 0,121 0,1206 0,0379 0,0382 0,0407
"""
# go through each line, searching for the various components
OD600_regex_search_string = "Plate:\sOD600"
# regex_search_string = "Group:\s+Results Kinetic\s+1"
OD600_plate_is_found = False
OD600_plate_line = 0
first_line_with_OD600_data_int = None
first_line_with_OD600_data = None
"""
CHECK IF "OD600" or "Cell Density (OD600)" is used in this template.
Plate: Cell Density (OD600) 1,3 PlateFormat Endpoint Absorbance Raw TRUE 1 1 600 1 12 96 1 8 None
Temperature(¡C) 1 2 3 4 5 6 7 8 9 10 11 12
28,00 0,109467 0,119067 0,119167 0,112067 0,140267 0,126567 0,043267 0,052867 0,053967 0,052567 0,052567 0,052767
"""
OD600_regex_search_string = None
with open(txt_path, "r") as f:
for line in f:
if "Cell Density (OD600)" in line:
OD600_regex_search_string = "Plate:\sCell\sDensity\s\(OD600\)"
break
if "Plate:\tOD600" in line:
OD600_regex_search_string = "Plate:\sOD600"
break
if OD600_regex_search_string is None:
raise ValueError("Line with OD600 not found in text template. Check text output file.")
with open(txt_path, "r") as f:
for n, line in enumerate(f):
match = re.match(OD600_regex_search_string, line)
if match:
OD600_plate_is_found = True
OD600_plate_line = n
first_line_with_OD600_data_int = OD600_plate_line + 2
if n == first_line_with_OD600_data_int:
first_line_with_OD600_data = line
total_n_lines = n
if not OD600_plate_is_found:
raise ValueError("Line with OD600 not found in text template. Check text output file.")
table_start = first_line_with_OD600_data_int
table_end = first_line_with_OD600_data_int + 8
skipfooter = total_n_lines - table_end + 1
if "," in first_line_with_OD600_data:
decimal = ","
elif "." in first_line_with_OD600_data:
decimal = "."
df = pd.read_table(txt_path, skiprows=table_start, sep='\s+', decimal=decimal, header=None, skipfooter=skipfooter, engine="python")
"""
OD600 data now looks something like this.
The temperature in row 0 puts everything out of order.
0 1 2 3 4 5 6 7 8 9 10 11 12
0 24.1000 0.1094 0.1322 0.1210 0.1170 0.1214 0.1239 0.1128 0.1219 0.1191 0.1152 0.1172 0.116
1 0.1530 0.1564 0.1582 0.1518 0.1636 0.1528 0.1448 0.1651 0.1371 0.1484 0.1491 0.1509 Na
2 0.1194 0.1218 0.1266 0.1200 0.1171 0.1252 0.1155 0.1227 0.1230 0.1204 0.1221 0.1159 Na
3 0.1217 0.1237 0.1239 0.1190 0.1217 0.1245 0.1168 0.1241 0.1207 0.1168 0.1203 0.1203 Na
4 0.1152 0.1190 0.1402 0.1184 0.1443 0.1219 0.1193 0.1254 0.1206 0.1173 0.1167 0.1165 Na
5 0.1253 0.1313 0.1435 0.1232 0.1261 0.1298 0.1239 0.1315 0.1133 0.1193 0.1157 0.1178 Na
6 0.1136 0.1143 0.1359 0.1172 0.1373 0.1275 0.1159 0.1281 0.1224
7 0.1078 0.1206 0.1243 0.1139 0.1199 0.1229 0.1172 0.1210 0.1206 0.1195 0.1168 0.1143 Na
"""
# fix up alignment in dataframe, rename columns and index
df.loc[0, :] = list(df.loc[0, 1:]) + [0]
df = df.loc[:, 0:11]
df.index = list("ABCDEFGH")
df.columns = range(1, 13)
"""
OD600 data is ready for heatmap
1 2 3 4 5 6 7 8 9 \
A 0.1094 0.1322 0.1210 0.1170 0.1214 0.1239 0.1128 0.1219 0.1191
B 0.1530 0.1564 0.1582 0.1518 0.1636 0.1528 0.1448 0.1651 0.1371
C 0.1194 0.1218 0.1266 0.1200 0.1171 0.1252 0.1155 0.1227 0.1230
D 0.1217 0.1237 0.1239 0.1190 0.1217 0.1245 0.1168 0.1241 0.1207
E 0.1152 0.1190 0.1402 0.1184 0.1443 0.1219 0.1193 0.1254 0.1206
F 0.1253 0.1313 0.1435 0.1232 0.1261 0.1298 0.1239 0.1315 0.1133
G 0.1136 0.1143 0.1359 0.1172 0.1373 0.1275 0.1159 0.1281 0.1224
H 0.1078 0.1206 0.1243 0.1139 0.1199 0.1229 0.1172 0.1210 0.1206
"""
tum_blue4_as_python_color = np.array([0, 82, 147]) / 255
cmap = sns.light_palette(tum_blue4_as_python_color, as_cmap=True)
plt.close("all")
fig, ax = plt.subplots()
ax = sns.heatmap(df, ax=ax, cmap=cmap, vmin=0.075)
ax.get_figure()
# rotate the y-ticklabels
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
# set the x-axis labels on the top
ax.xaxis.tick_top()
fig.savefig(out_OD600_heatmap, dpi=240 )
"""
OD600 heatmap is finished.
Now grab the kinetic data, which is presented in a table at the bottom of the text file.
This is the FORMAT OF THE INPUT SOFTMAX PRO FILE, at region of processed kinetic data
..... <----------- raw kinetic data is above. we currently use the sofmax pro calculated initial velocity
~End
Group: Results Kinetic 1 <----------- group_results_line. Use regex to find the line number associated with this string.
Sample Wells Sample# V/max Vmax/OD600 Mean SD
Vm01 A1 1 43,605 1231,785 1379,232 132,621 <----------- first_line_with_data. Use this to determine if . or , is the decimal.
A2 59,253 1488,774
A3 50,733 1417,135
Vm02 B1 2 26,652 870,965 801,480 75,053
B2 39,200 811,590
B3 36,816 721,885
...
~End The last line with ~End shows the location of the end of the file. This is variable. Best to use dropna() to drop empty rows at the end of the dataframe.
"""
# go through each line, searching for the various components
regex_search_string = "Group:\s+Results Kinetic\s+1"
group_results_are_found = False
group_results_line = 0
first_line_with_data_int = None
first_line_with_data = None
start_of_table = None
with open(txt_path, "r") as f:
for n, line in enumerate(f):
match = re.match(regex_search_string, line)
if match:
group_results_are_found = True
group_results_line = n
first_line_with_data_int = group_results_line + 2
start_of_table = group_results_line+1
if n == start_of_table:
start_of_table_data = line
if n == first_line_with_data_int:
first_line_with_data = line
if not group_results_are_found:
raise ValueError("Softmax template is not recognised. Check input file names, and that 'Group: Results Kinetic' is located in the text file.")
# check if the data is saved using a German or English decimal point
if "," in first_line_with_data:
dec = ","
elif "." in first_line_with_data:
dec = "."
else:
raise TypeError("VersaMax exported text file seems to have an unknown decimal format. Try re-exporting data.")
skiprows = group_results_line
# DEPRECATED. DID NOT WORK.
# if "Sample" in start_of_table_data:
# skiprows = group_results_line
# elif "Group" in start_of_table_data:
# skiprows = group_results_line + 1
# else:
# raise ValueError("skiprows does not work. check txt file format.")
# open csv as a pandas dataframe, starting from "Sample Wells Sample# V/max Vmax/OD600 Mean" ...etc
df = pd.read_csv(txt_path, sep='\t', skiprows=skiprows, decimal=dec)
if 'Sample' not in df.columns:
"""
.
pandas versions seem to use a different skiprows method
need to check that the skiprows is correctly aligned
Group: Results Kinetic 1
Sample Wells Sample# V/max Vmax/OD600 Mean SD
Vm01 A1 1 55,264 790,996 776,806 48,225
"""
if "Group:" in df.columns:
skiprows = group_results_line + 1
df = pd.read_csv(txt_path, sep='\t', skiprows=skiprows, decimal=dec)
else:
raise ValueError("Pandas has tried to make a dataframe from OD600 values, but can't seem to locate the header.\n"
"Check text file for output.\nCurrent dataframe columns : {}.".format(df.columns))
# drop the last footer rows, which may vary in length
df.dropna(subset=["V/max"], inplace=True)
# fill the names downwards, so that they can be used for creating pivot tables later
df.fillna(method="pad", inplace=True)
# search for datapoints that could not be fitted
if "NoFit" in df["V/max"].tolist():
df_no_fit = df.loc[df["V/max"] == "NoFit"].copy()
df_no_fit["Vm_Well"] = df_no_fit["Sample"] + "_well_" + df_no_fit["Wells"]
sample_names_with_no_fit = df_no_fit["Sample"].unique()
samples_with_no_fit = df_no_fit["Vm_Well"].dropna().unique()
sys.stdout.write("'NoFit' found in data, indicating measurement was impossible, probably due to very high enzyme concentrations.\n"
"Samples affected: {}".format(samples_with_no_fit))
df.replace("NoFit", np.nan, inplace=True)
df["V/max"] = df["V/max"].astype(float)
df["Vmax/OD600"] = df["Vmax/OD600"].astype(float)
#df["V/max"] = df["V/max"].replace("NoFit", np.nan).astype(float)
#df["Vmax/OD600"] = df["Vmax/OD600"].replace("NoFit", np.nan).astype(float)
# drow any rows with no fit
#df = df.loc[df["V/max"].notnull()]
else:
sample_names_with_no_fit = []
# calculate OD from V and V/OD (not currently supplied by the template)
df["OD"] = df["V/max"] / df["Vmax/OD600"]
"""
df should look like this. Basically, it looks like the SoftMax pro output using Jan Kirrbach's template.
Sample Wells Sample# V/max Vmax/OD600 Mean SD OD
0 Vm01 A1 1.0 43.605 1231.785 1379.232 132.621 0.035400
1 NaN A2 NaN 59.253 1488.774 NaN NaN 0.039800
2 NaN A3 NaN 50.733 1417.135 NaN NaN 0.035800
3 Vm02 B1 2.0 26.652 870.965 801.480 75.053 0.030601
4 NaN B2 NaN 39.200 811.590 NaN NaN 0.048300
"""
Vi_replicates = ["Vi1","Vi2","Vi3"] * int(df.shape[0]/3)
MU_replicates = ["MU1","MU2","MU3"] * int(df.shape[0]/3)
OD_replicates = ["OD1","OD2","OD3"] * int(df.shape[0]/3)
df["Vi_rep"] = Vi_replicates
df["MU_rep"] = MU_replicates
df["OD_rep"] = OD_replicates
dfv = df.pivot_table(values="V/max", index = "Sample", columns="Vi_rep")
dfd = df.pivot_table(values="Vmax/OD600", index = "Sample", columns="MU_rep")
dfo = df.pivot_table(values="OD", index = "Sample", columns="OD_rep")
dfd = pd.concat([dfv, dfd, dfo], axis=1)
"""
dfd now contains the values for each replicate, one after another
Vi1 Vi2 Vi3 MU1 MU2 MU3 OD1 OD2 OD3
Sample
Vm01 43.605 59.253 50.733 1231.785 1488.774 1417.135 0.035400 0.0398 0.0358
Vm02 26.652 39.200 36.816 870.965 811.590 721.885 0.030601 0.0483 0.0510
Vm03 36.813 66.878 56.552 1191.375 1706.078 1516.140 0.030900 0.0392 0.0373
Vm04 42.418 46.726 45.454 1258.694 1115.172 1014.602 0.033700 0.0419 0.0448
Vm05 70.692 101.895 88.205 2380.192 3078.387 2183.284 0.029700 0.0331 0.0404
Vi is the initial velocity.
MU is the miller units
OD is the OD600 (cell density)
now the mean and standard deviation is simply recalculated for the Vi, MU and OD replicates
"""
dfd["Vi_mean"] = dfd.loc[:, "Vi1":"Vi3"].mean(axis=1)
dfd["Vi_std"] = dfd.loc[:, "Vi1":"Vi3"].std(axis=1)
dfd["MU_mean"] = dfd.loc[:, "MU1":"MU3"].mean(axis=1)
dfd["MU_std"] = dfd.loc[:, "MU1":"MU3"].std(axis=1)
dfd["OD_mean"] = dfd.loc[:, "OD1":"OD3"].mean(axis=1)
dfd["OD_std"] = dfd.loc[:, "OD1":"OD3"].std(axis=1)
# open excel file with sample names
dfs = pd.read_excel(samples_path, index_col=0)
dfs["frame"] = dfs.frame.dropna().astype(int).astype(str)
dfs.fillna("", inplace=True)
for i in dfs.index:
dfs.loc[i, "sample_python"] = dfs.loc[i, "uniprot":"mut"].dropna().astype(str).str.cat()
# try:
# dfs.loc[i, "sample_python"] = dfs.loc[i, "uniprot":"mut"].dropna().astype(str).str.cat()
# except:
# print(i)
# print(dfs.loc[i, "uniprot":"mut"])
# print(dfs.loc[i, "uniprot":"mut"].dropna())
# dfs.loc[i, "sample_python"] = dfs.loc[i, "uniprot":"mut"].dropna().astype(str).str.cat()
if dfs["sample"].tolist() != dfs.sample_python.tolist():
raise ValueError("sample column in excel does not exactly match data in preceding columns. Try repeating fill-down.")
sample_name_dict = dfs.set_index("Vm#")["sample"].to_dict()
order_dict = dfs.set_index("Vm#")["order"].to_dict()
include_data_dict = dfs.set_index("Vm#")["include_data"].to_dict()
"""
dfs should now look like this
Vm# uniprot - frame _ shortname __ mut sample notes include_data sample_python
# #
1 Vm01 P02724 - 0 _ GpA _ wt P02724-0_GpA_wt GpA 1 True P02724-0_GpA_wt
2 Vm02 ΔTM ΔTM ΔTM 2 True ΔTM
3 Vm03 Q6ZRP7 - 2 _ QSOX2 _ L4A Q6ZRP7-2_QSOX2_L4A QSOX2_L4A 3 True Q6ZRP7-2_QSOX2_L4A
4 Vm04 Q6ZRP7 - 2 _ QSOX2 _ S8A Q6ZRP7-2_QSOX2_S8A QSOX2_S8A 4 True Q6ZRP7-2_QSOX2_S8A
5 Vm05 Q6ZRP7 - 2 _ QSOX2 _ S8Q Q6ZRP7-2_QSOX2_S8Q QSOX2_S8Q 5 True Q6ZRP7-2_QSOX2_S8Q
"""
dfd["Vm"] = dfd.index
dfd["sample"] = dfd["Vm"].replace(sample_name_dict)
dfd["order"] = dfd["Vm"].replace(order_dict)
dfd["include_data"] = dfd["Vm"].replace(include_data_dict)
# note that there has been an error in some samples
dfd.loc[sample_names_with_no_fit, "error"] = "NoFit"
dfd["error"] = dfd["error"].fillna("none")
# change the column order
col_order = ['sample', 'order','include_data', 'error', 'OD1', 'OD2', 'OD3','OD_mean', 'OD_std','Vi1', 'Vi2', 'Vi3', 'Vi_mean', 'Vi_std', 'MU1', 'MU2', 'MU3', 'MU_mean', 'MU_std', 'Vm']
dfd = dfd.reindex(columns=col_order)
dfd.sort_values("order", inplace=True)
dfn = pd.DataFrame()
if True in dfs.standard.tolist():
standard_subset = dfs.loc[dfs.standard == True]
standard_sample_name_list = standard_subset.sample_python.unique()
if len(standard_sample_name_list) != 1:
raise ValueError("Multiple standards are selected. Double-check excel file with sample names.")
standard_sample_name = standard_sample_name_list[0]
standard_Vm_numbers = standard_subset.index.tolist()
standard_values_ser_or_df = dfd.loc[dfd["sample"] == standard_sample_name]
if isinstance(standard_values_ser_or_df, pd.DataFrame):
standard_mean_values_ser = standard_values_ser_or_df.mean(axis=0)
elif isinstance(standard_values_ser_or_df, pd.Series):
standard_mean_values_ser = standard_values_ser_or_df
else:
raise TypeError("Oops! 'standard_values_ser_or_df' is neither a dataframe nor a series")
for i in range(dfd.shape[0]):
row_series = dfd.iloc[i, :]
Vm = dfd.index[i]
dfn[Vm] = row_series / standard_mean_values_ser
dfn = dfn.T
""" Norm dataframe has problem in that the Std is 1 for the standard. (norm to standard, not std/mean_of_standard)
This is fixed as below.
OD2 OD3 OD_mean OD_std Vi1 Vi2 \
Vm01 1 1 1 1 1 1
Vm17 1.09716 0.692331 0.966009 3.70093 3.08974 2.61241
Vm25 0.19164 1.3053 0.566228 8.62627 0.360652 -0.164633
Vm02 1.30523 1.44421 1.41189 0.0739331 0.045963 0.0785936
"""
datatypes = ["OD", "Vi", "MU"]
for datatype in datatypes:
std_column = dfd["{}_std".format(datatype)]
std_normed = std_column / standard_mean_values_ser["{}_mean".format(datatype)]
# replace the column in dfn
dfn["{}_std".format(datatype)] = std_normed
"""std is corrected, and should now look like this (a value other than 1 for the standard!)
Vi3 Vi_mean Vi_std Vm error include_data order sample
Vm01 1 1 0.117211 NaN NaN 1 1 NaN
Vm17 2.56545 2.75133 0.369209 NaN NaN 1 2 NaN
Vm25 0.201911 0.114587 0.273156 NaN NaN 0 3 NaN
"""
for transferred_col in ["sample", "order", "Vm", "include_data"]:
dfn[transferred_col] = dfd[transferred_col]
dfn = dfn.reindex(columns=col_order)
dfn.sort_values("order", inplace=True)
"""
dfn still has the Vm numbers as the index, which allows duplicates (e.g. two wells of QSOX2_wt in this experiment)
sample order OD1 OD2 OD3 OD_mean
Vm01 P02724-0_GpA_wt 1 0.801497 0.853056 0.767382 0.8074
Vm17 AZ2 2 0.880439 0.935936 0.531282 0.779956
Vm25 P02724-0_GpA_G83A 3 0.185782 0.16348 1.00166 0.457173
Vm02 dTM 4 1.20262 1.11343 1.10826 1.13996
Vm09 Q6ZRP7-2_QSOX2_wt 5 1.05084 0.940833 0.935106 0.973782
Vm24 Q6ZRP7-2_QSOX2_wt 6 0.949156 1.05917 1.06489 1.02622
Vm10 Q6ZRP7-2_QSOX2_C1S 7 1.42985 1.56866 1.648 1.55175
Vm18 Q6ZRP7-2_QSOX2_V2A 8 1.40206 1.50719 1.54405 1.48644
Vm26 Q6ZRP7-2_QSOX2_V3A 9 1.11713 0.116117 1.39081 0.869624
Vm03 Q6ZRP7-2_QSOX2_L4A 10 0.931424 0.765291 0.812374 0.834106
"""
unique_samples = dfn["sample"].unique()
dfn.set_index("sample", inplace=True)
# exclude datapoints according to manual annotation in the "include_data" column
dfn_include_data = dfn.loc[dfn.include_data]
if dfn_include_data.empty:
# all data has been labelled for exclusion here
sys.stdout.write()
sys.stdout.write("\n'{}' skipped, all data labeled as FALSE for 'include_data' in samples excel file.".format(exp_name))
# skip this protein
return dfd
dfnu = pd.DataFrame()
for s in unique_samples:
if s not in dfn_include_data.index:
# skip this unique sample, as it was manually excluded via the "include_data" column
continue
row_df_or_ser = dfn_include_data.loc[s, :]
if isinstance(row_df_or_ser, pd.Series):
dfnu.loc[:, s] = row_df_or_ser
elif isinstance(row_df_or_ser, pd.DataFrame):
# take the order of the first duplicate row as the correct one
order = row_df_or_ser.iloc[0, :]["order"]
# drop the text rows, so the dtype is changed to float
# otherwise the mean of two values for the standard (which should give 1.0) returns np.nan
# reason for strange behaviour in pandas is unknown
row_df_or_ser = row_df_or_ser.drop(["Vm", "order"], axis=1)
mean_of_duplicate_samples = row_df_or_ser.mean(axis=0)
# add the single row to the unique dataframe
dfnu.loc[:, s] = mean_of_duplicate_samples
dfnu.loc["order", s] = order
dfnu = dfnu.T
dropped_cols = ["include_data", "error"]
dfnu.drop(dropped_cols, axis=1, inplace=True)
ttest_standard_true_selection = dfs[dfs.ttest_standard == True]
ttest_standard_samplename = ttest_standard_true_selection["sample"].unique()[0]
if ttest_standard_samplename == standard_sample_name:
sys.stdout.write("\n--------------------------------------------------------------------"
"Sorry, script is currently not designed to handle situations where"
"the TTEST standard is the same as the norm standard. TTEST results should be ignored. "
"Talk to Mark.------------------------------------------------------------------------\n")
ttest_standard_array = dfs.loc[dfs.ttest_standard == True].sample_python.unique()
if len(ttest_standard_array) == 1:
ttest_standard = ttest_standard_array[0]
if ttest_standard in dfnu.index:
dfnu.loc[ttest_standard, "ttest_standard"] = True
elif len(ttest_standard_array) > 1:
raise ValueError("seems to be two different ttest standards selected")
elif len(ttest_standard_array) == 0:
print("no ttest_standard selected")
"""
dfnu is now completely normalised to your standard (e.g. QSOX2).
This includes all OD measurements, initial velocity, and Miller Units.
The index is now unique. The values of duplicates show the mean for that experiment.
The values of duplicate standards (e.g. Q6ZRP7-2_QSOX2_wt) are now 1.0.
order OD1 OD2 OD3 OD_mean OD_ Vi1 Vi2 Vi3 Vi_mean Vi_std
P02724-0_GpA_wt 1 0.928313 0.898545 0.556443 0.748 0.0632076 0.594462 0.683314 0.632995 0.637554 0.669004
AZ2 2 0.709755 0.673359 0.401063 0.556855 0 0.563122 0.530232 0.466218 0.516645 0
P02724-0_GpA_G83A 3 5.40776 5.13045 3.05578 4.24278 0 0.778933 0.733438 0.644892 0.714644 0
dTM 4 3.29763 2.30533 0.788788 1.86296 2.09499 0.0715175 0.0668064 0.0597716 0.0656486 0.00467747
Q6ZRP7-2_QSOX2_wt 5 1 1 1 1 1 1 1 1 1 1
"""
dfd.index.name = "Vm"
dfd.to_csv(out_parsed_csv)
with pd.ExcelWriter(out_parsed_excel) as writer:
dfd.to_excel(writer, sheet_name="original")
dfn.to_excel(writer, sheet_name="normalised")
dfnu.to_excel(writer, sheet_name="norm_unique")
writer.close()
sys.stdout.write("\n'{}' parsed successfully".format(exp_name))
if skiprows == group_results_line + 1:
sys.stdout.write(" (Note: You may have an older version of Pandas or python. A skiprows error has been averted.)")
sys.stdout.flush()
return dfd
def parse_all_data_files_in_folder(target_dir, reparse_existing=False):
"""Parse all ToxR SoftMax Pro txt files in a given directory.
Parameters
----------
target_dir : str
Directory with .pda, .txt and .xls input files
"""
txt_file_list = glob.glob(os.path.join(target_dir, "*.txt"))
counter = 0
for txt_file in txt_file_list:
pda_file_path = txt_file[:-4] + ".pda"
samples_file_path = txt_file[:-4] + ".xls"
exp_name = os.path.basename(txt_file)[:-4][0:60]
out_dir = os.path.join(os.path.dirname(txt_file), exp_name)
excel_format = ".xls"
out_parsed_excel = os.path.join(out_dir, "{}_parsed{}".format(exp_name, excel_format))
if os.path.isfile(pda_file_path) and os.path.isfile(samples_file_path):
if not reparse_existing:
if os.path.isfile(out_parsed_excel):
# skip analysis of this file
sys.stdout.write("\n{} skipped, parsed output file already exists.".format(exp_name))
sys.stdout.flush()
continue
dfd = parse_softmax(txt_file, samples_file_path)
counter += 1
sys.stdout.write("\n\nparse_all_data_files_in_folder is finished.\n{} file(s) parsed in total.\n-----------------------------------------------------------------\n".format(counter))
sys.stdout.flush()
def contains_kw(string):
control_keywords = ["GpA", "dTM", "AZ2"]
for kw in control_keywords:
if kw in string:
return True
return False
def extract_mut_names_from_collected_data(collected_xls, scan_mut_xlsx, stat_method="mean"):
"""Parses the collected.xls file with ToxR data so that it can be used as input
for scanning mutagenesis barchart creation.
Parameters
----------
collected_xls : str
Path to input collected.xls
scan_mut_xlsx : str
Path to output scan_mut.xlsx
"""
df_coll = pd.read_excel(collected_xls, sheetname="Vi_describe")
""" df_coll looks like this
count mean std min 25% 50% 75% max sem order
P02724-0_GpA_wt 7 1.000000 0.000000 1.000000 1.000000 1.000000 1.000000 1.000000 0.000000 1
AZ2 7 0.761144 0.061944 0.631301 0.760551 0.775837 0.786084 0.827599 0.023413 2
P02724-0_GpA_G83A 7 0.376627 0.204756 0.088955 0.252525 0.295542 0.566675 0.613493 0.077390 3
dTM 7 0.036521 0.021639 0.005306 0.028094 0.036204 0.040399 0.077153 0.008179 4
Q6ZRP7-2_QSOX2_wt 7 0.906791 0.410194 0.528483 0.606863 0.811692 1.051753 1.690128 0.155039 5
Q6ZRP7-2_QSOX2_C1S 5 0.880809 0.143449 0.773950 0.791562 0.836428 0.874484 1.127623 0.064153 7
"""
# identify common controls and remove them from the sample list
df_coll["name"] = df_coll.index
df_coll["contains_kw"] = df_coll["name"].apply(contains_kw).tolist()
df_coll = df_coll.loc[~df_coll["contains_kw"]].copy()
# extract orig_aa, final_aa, aa_position etc from the full sample name (e.g. Q6ZRP7-2_QSOX2_C1S)
df_coll["mut"] = df_coll["name"].str.split("_").str[-1]
df_coll["orig_aa"] = df_coll["mut"].str[0]
df_coll["target_aa"] = df_coll["mut"].str[-1]
df_coll["aa_position_unit"] = df_coll["mut"].str[1:-1]
# assume that any name that contains "wt" is the wildtype.
wt_name = df_coll.loc[df_coll["name"].str.contains("wt")].index
# IMPORTANT. Get the value for the wildtype. Used to normalise all values to wt later.
wt_value = float(df_coll.loc[wt_name, stat_method])
# if more than one name has wt, raise an error
assert len(wt_name) == 1
# rename some values in the the wildtype row
df_coll.loc[wt_name, "orig_aa"] = "WT"
df_coll.loc[wt_name, "target_aa"] = "B"
df_coll.loc[wt_name, "aa_position_unit"] = "0"
"""df_coll now looks something like this:
count mean std min 25% 50% 75% max sem order name contains_kw mut orig_aa target_aa aa_position_unit
Q6ZRP7-2_QSOX2_wt 7 0.906791 0.410194 0.528483 0.606863 0.811692 1.051753 1.690128 0.155039 5 Q6ZRP7-2_QSOX2_wt False wt WT B 0
Q6ZRP7-2_QSOX2_C1S 5 0.880809 0.143449 0.773950 0.791562 0.836428 0.874484 1.127623 0.064153 7 Q6ZRP7-2_QSOX2_C1S False C1S C S 1
Q6ZRP7-2_QSOX2_V2A 5 0.737907 0.259606 0.553112 0.555503 0.569161 0.883270 1.128490 0.116099 8 Q6ZRP7-2_QSOX2_V2A False V2A V A 2
"""
# take only the desired columns
df = df_coll[["count", "mean", "median", "std", "sem", "orig_aa", "target_aa", "aa_position_unit", "ttest_p_value"]].copy()
# normalise the mean, median, standard-deviation and SEM to the wildtype value
for col in ["mean", "median", "std", "sem"]:
df[col] = df[col] / wt_value
# rename columns according to a dictionary
rename_dict = {"std": "SD", "sem": "SE", "count": "Rep", stat_method: "perc_WT", "aa_position_unit": "amino acid position",
"orig_aa": "original amino acid", "target_aa": "final amino acid", "ttest_p_value" : "p_value"}
df.rename(columns=rename_dict, inplace=True)
# some graphs need a p-value
if "p_value" not in df.columns:
df["p_value"] = 1
df.index = range(df_coll.shape[0])
df.to_excel(scan_mut_xlsx)
print("extract_mut_names_from_collected_data is finished") | mit |
fabioticconi/scikit-learn | examples/svm/plot_svm_anova.py | 85 | 2024 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature selection before running a
SVC (support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using 1 CPU
this_scores = cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
cainiaocome/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
low-sky/simscript | postproc/pipeline_orion.py | 1 | 1278 | import subprocess
import sys
import postproc_yt_orion as pp
import os
import shutil
import matplotlib
matplotlib.use('Agg')
targetdir = sys.argv[1]
# timestep = float(sys.argv[2])
data_file = sys.argv[2]
face = float(sys.argv[3])
level = float(sys.argv[4])
ppdir = os.getenv('PPDIR')
datadir = os.getenv("DATADIR")
outdir = os.getenv('PPOUTDIR')
radmcdir = os.getenv("RADMCDIR")
# ppdir = '/home/e/eros/eros/code/simscript/postproc/'
# datadir = '/home/e/eros/eros/scratch/test/'
# outdir = '/home/e/eros/eros/scratch/test/'
# radmcdir = '/home/e/eros/eros/project/'
D = pp.FileSetup(targetdir, data_file,
face=face, level=level,
ppdir=ppdir)
# pp.ProblemSetup(D['FileName'], face=face, dust_temp=D['GasTemp'])
os.chdir(D['TempDir'])
print(D)
command = radmcdir + 'radmc3d image npix ' + \
str(int(D['GridSize'])) + \
' iline 1 widthkms 10 linenlam 500 loadlambda fluxcons inclline linelist nostar writepop doppcatch sizepc 10 norefine'
print(command)
result = subprocess.call(command, shell=True)
print(result)
print(D['FileName'])
save_name = os.path.join(outdir, D['FileName'] + '_radmc.fits')
pp.MakeFits(fitsfile=save_name, dpc=260.0, toK=True)
shutil.move(save_name, outdir)
os.chdir(outdir)
# shutil.rmtree(D['TempDir'])
| gpl-2.0 |
tum-camp/survival-support-vector-machine | survival/svm/minlip.py | 1 | 12742 | import numpy
from scipy import sparse
from sklearn.base import BaseEstimator
from sklearn.metrics.pairwise import pairwise_kernels
from ..base import SurvivalAnalysisMixin
from ..util import check_arrays_survival
from ._minlip import create_difference_matrix
__all__ = ['MinlipSurvivalAnalysis', 'HingeLossSurvivalSVM']
class MinlipSurvivalAnalysis(BaseEstimator, SurvivalAnalysisMixin):
"""Survival model related to survival SVM, using a minimal Lipschitz smoothness strategy
instead of a maximal margin strategy.
.. math::
\\min_{\\mathbf{w}}\\quad
\\frac{1}{2} \\lVert \\mathbf{w} \\rVert_2^2
+ \\gamma \\sum_{i = 1}^n \\xi_i \\\\
\\text{subject to}\\quad
\\mathbf{w}^\\top \\mathbf{x}_i - \\mathbf{w}^\\top \\mathbf{x}_j \\geq y_i - y_j - \\xi_i,\\quad
\\forall (i, j) \\in \\mathcal{P}_\\text{1-NN}, \\\\
\\xi_i \\geq 0,\\quad \\forall i = 1,\\dots,n.
\\mathcal{P}_\\text{1-NN} = \\{ (i, j) \\mid y_i > y_j \\land \\delta_j = 1
\\land \\nexists k : y_i > y_k > y_j \\land \\delta_k = 1 \\}_{i,j=1}^n.
Parameters
----------
solver : "cvxpy" | "cvxopt", optional (default: cvxpy)
Which quadratic program solver to use.
alpha : float, positive (default: 1)
Weight of penalizing the hinge loss in the objective function.
kernel : "linear" | "poly" | "rbf" | "sigmoid" | "cosine" | "precomputed"
Kernel.
Default: "linear"
gamma : float, optional
Kernel coefficient for rbf and poly kernels. Default: ``1/n_features``.
Ignored by other kernels.
degree : int (default=3)
Degree for poly kernels. Ignored by other kernels.
coef0 : float, optional
Independent term in poly and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Parameters (keyword arguments) and values for kernel passed as call
pairs : "all" | "nearest" | "next", optional (default: "nearest")
Which constraints to use in the optimization problem.
- all: Use all comparable pairs. Scales quadratic in number of samples
(cf. :class:`survival.svm.HingeLossSurvivalSVM`).
- nearest: Only considers comparable pairs :math:`(i, j)` where :math:`j` is the
uncensored sample with highest survival time smaller than :math:`y_i`.
Scales linear in number of samples.
- next: Only compare against direct nearest neighbor according to observed time,
disregarding its censoring status. Scales linear in number of samples.
verbose : bool (default: False)
Enable verbose output of solver
timeit : False or int
If non-zero value is provided the time it takes for optimization is measured.
The given number of repetitions are performed. Results can be accessed from the
``timings_`` attribute.
Attributes
----------
`X_fit_` :
Training data.
`coef_` :
Coefficients of the features in the decision function.
References
----------
.. [1] Van Belle, V., Pelckmans, K., Suykens, J. A. K., and Van Huffel, S.
Learning transformation models for ranking and survival analysis.
The Journal of Machine Learning Research, 12, 819-862. 2011
"""
def __init__(self, solver="cvxpy",
alpha=1.0, kernel="linear", gamma=None, degree=3, coef0=1, kernel_params=None,
pairs="nearest", verbose=False, timeit=None):
self.solver = solver
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.pairs = pairs
self.verbose = verbose
self.timeit = timeit
@property
def _pairwise(self):
# tell sklearn.cross_validation._safe_split function that we expect kernel matrix
return self.kernel == "precomputed"
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
def _fit(self, x, event, time):
D = create_difference_matrix(event.astype(numpy.uint8), time, kind=self.pairs)
K = self._get_kernel(x)
if self.solver == "cvxpy":
fit_func = self._fit_cvxpy
elif self.solver == "cvxopt":
fit_func = self._fit_cvxopt
else:
raise ValueError("unknown solver: {}".format(self.solver))
if self.timeit is not None:
import timeit
def _inner():
return fit_func(K, D, time)
timer = timeit.Timer(_inner)
self.timings_ = timer.repeat(self.timeit, number=1)
coef, sv = fit_func(K, D, time)
if sv is None:
self.coef_ = coef * D
else:
self.coef_ = coef[:, sv] * D[sv, :]
self.X_fit_ = x
def _fit_cvxpy(self, K, D, time):
import cvxpy
n_pairs = D.shape[0]
a = cvxpy.Variable(n_pairs)
P = D.dot(D.dot(K).T).T
q = D.dot(time)
obj = cvxpy.Minimize(0.5 * cvxpy.quad_form(a, P) - a.T * q)
assert obj.is_dcp()
alpha = cvxpy.Parameter(sign="positive", value=self.alpha)
constraints = [a >= 0., -alpha <= D.T * a, D.T * a <= alpha]
prob = cvxpy.Problem(obj, constraints)
prob.solve(verbose=self.verbose)
return a.value.T.A, None
def _fit_cvxopt(self, K, D, time):
import cvxopt
n_samples = K.shape[0]
P = D.dot(D.dot(K).T).T
q = -D.dot(time)
high = numpy.repeat(self.alpha, n_samples * 2)
n_pairs = D.shape[0]
G = sparse.vstack((D.T, -D.T, -sparse.eye(n_pairs)))
h = numpy.concatenate((high, numpy.zeros(n_pairs)))
Gsp = cvxopt.matrix(G.toarray())
# Gsp = cvxopt.spmatrix(G.data, G.row, G.col, G.shape)
cvxopt.solvers.options["show_progress"] = int(self.verbose)
sol = cvxopt.solvers.qp(cvxopt.matrix(P), cvxopt.matrix(q), Gsp, cvxopt.matrix(h))
return numpy.array(sol['x']).T, None
def fit(self, X, y):
"""Build a MINLIP survival model from training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Data matrix.
y : structured array, shape = [n_samples]
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
X, event, time = check_arrays_survival(X, y)
self._fit(X, event, time)
return self
def predict(self, X):
"""Predict risk score of experiencing an event.
Higher scores indicate shorter survival (high risk),
lower scores longer survival (low risk).
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
Predicted risk.
"""
K = self._get_kernel(X, self.X_fit_)
pred = -numpy.dot(self.coef_, K.T)
return pred.ravel()
class HingeLossSurvivalSVM(MinlipSurvivalAnalysis):
"""Naive implementation of kernel survival support vector machine.
A new set of samples is created by building the difference between any two feature
vectors in the original data, thus this version requires :math:`O(\\text{n_samples}^4)` space and
:math:`O(\\text{n_samples}^6 \\cdot \\text{n_features})`.
See :class:`survival.svm.NaiveSurvivalSVM` for the linear naive survival SVM based on liblinear.
.. math::
\\min_{\\mathbf{w}}\\quad
\\frac{1}{2} \\lVert \\mathbf{w} \\rVert_2^2
+ \\gamma \\sum_{i = 1}^n \\xi_i \\\\
\\text{subject to}\\quad
\\mathbf{w}^\\top \\phi(\\mathbf{x})_i - \\mathbf{w}^\\top \\phi(\\mathbf{x})_j \\geq 1 - \\xi_{ij},\\quad
\\forall (i, j) \\in \\mathcal{P}, \\\\
\\xi_i \\geq 0,\\quad \\forall (i, j) \\in \\mathcal{P}.
\\mathcal{P} = \\{ (i, j) \\mid y_i > y_j \\land \\delta_j = 1 \\}_{i,j=1,\\dots,n}.
Parameters
----------
solver : "cvxpy" | "cvxopt", optional (default: cvxpy)
Which quadratic program solver to use.
alpha : float, positive
Weight of penalizing the hinge loss in the objective function (default: 1)
kernel : "linear" | "poly" | "rbf" | "sigmoid" | "cosine" | "precomputed"
Kernel.
Default: "linear"
gamma : float, optional
Kernel coefficient for rbf and poly kernels. Default: ``1/n_features``.
Ignored by other kernels.
degree : int (default=3)
Degree for poly kernels. Ignored by other kernels.
coef0 : float, optional
Independent term in poly and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Parameters (keyword arguments) and values for kernel passed as call
pairs : "all" | "nearest" | "next", optional (default: "all")
Which constraints to use in the optimization problem.
- all: Use all comparable pairs. Scales quadratic in number of samples.
- nearest: Only considers comparable pairs :math:`(i, j)` where :math:`j` is the
uncensored sample with highest survival time smaller than :math:`y_i`.
Scales linear in number of samples (cf. :class:`survival.svm.MinlipSurvivalSVM`).
- next: Only compare against direct nearest neighbor according to observed time,
disregarding its censoring status. Scales linear in number of samples.
verbose : bool (default: False)
Enable verbose output of solver.
timeit : False or int
If non-zero value is provided the time it takes for optimization is measured.
The given number of repetitions are performed. Results can be accessed from the
``timings_`` attribute.
Attributes
----------
`X_fit_` :
Training data.
`coef_` :
Coefficients of the features in the decision function.
References
----------
.. [1] Van Belle, V., Pelckmans, K., Suykens, J. A., & Van Huffel, S.
Support Vector Machines for Survival Analysis. In Proc. of the 3rd Int. Conf.
on Computational Intelligence in Medicine and Healthcare (CIMED). 1-8. 2007
.. [2] Evers, L., Messow, C.M.,
"Sparse kernel methods for high-dimensional survival data",
Bioinformatics 24(14), 1632-8, 2008.
.. [3] Van Belle, V., Pelckmans, K., Suykens, J.A., Van Huffel, S.,
"Survival SVM: a practical scalable algorithm",
In: Proc. of 16th European Symposium on Artificial Neural Networks,
89-94, 2008.
"""
def __init__(self, solver="cvxpy",
alpha=1.0, kernel="linear", gamma=None, degree=3, coef0=1, kernel_params=None,
pairs="all", verbose=False, timeit=None):
super().__init__(solver=solver, alpha=alpha, kernel=kernel, gamma=gamma, degree=degree, coef0=coef0,
kernel_params=kernel_params, pairs=pairs, verbose=verbose, timeit=timeit)
def _fit_cvxpy(self, K, D, time):
import cvxpy
n_pairs = D.shape[0]
a = cvxpy.Variable(n_pairs)
alpha = cvxpy.Parameter(sign="positive", value=self.alpha)
P = D.dot(D.dot(K).T).T
obj = cvxpy.Minimize(0.5 * cvxpy.quad_form(a, P) - cvxpy.sum_entries(a))
constraints = [a >= 0., a <= alpha]
prob = cvxpy.Problem(obj, constraints)
prob.solve(verbose=self.verbose)
coef = a.value.T.A
sv = numpy.flatnonzero(coef > 1e-5)
return coef, sv
def _fit_cvxopt(self, K, D, time):
import cvxopt
n_pairs = D.shape[0]
P = D.dot(D.dot(K).T).T
q = -numpy.ones(n_pairs)
G = numpy.vstack((-numpy.eye(n_pairs), numpy.eye(n_pairs)))
h = numpy.concatenate((numpy.zeros(n_pairs), numpy.repeat(self.alpha, n_pairs)))
cvxopt.solvers.options["show_progress"] = int(self.verbose)
sol = cvxopt.solvers.qp(cvxopt.matrix(P), cvxopt.matrix(q), cvxopt.matrix(G), cvxopt.matrix(h))
coef = numpy.array(sol['x']).T
sv = numpy.flatnonzero(coef > 1e-5)
return coef, sv
| gpl-3.0 |
pystockhub/book | ch18/day03/Kiwoom.py | 2 | 8383 | import sys
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
from PyQt5.QtCore import *
import time
import pandas as pd
import sqlite3
TR_REQ_TIME_INTERVAL = 0.2
class Kiwoom(QAxWidget):
def __init__(self):
super().__init__()
self._create_kiwoom_instance()
self._set_signal_slots()
def _create_kiwoom_instance(self):
self.setControl("KHOPENAPI.KHOpenAPICtrl.1")
def _set_signal_slots(self):
self.OnEventConnect.connect(self._event_connect)
self.OnReceiveTrData.connect(self._receive_tr_data)
self.OnReceiveChejanData.connect(self._receive_chejan_data)
def comm_connect(self):
self.dynamicCall("CommConnect()")
self.login_event_loop = QEventLoop()
self.login_event_loop.exec_()
def _event_connect(self, err_code):
if err_code == 0:
print("connected")
else:
print("disconnected")
self.login_event_loop.exit()
def get_code_list_by_market(self, market):
code_list = self.dynamicCall("GetCodeListByMarket(QString)", market)
code_list = code_list.split(';')
return code_list[:-1]
def get_master_code_name(self, code):
code_name = self.dynamicCall("GetMasterCodeName(QString)", code)
return code_name
def get_connect_state(self):
ret = self.dynamicCall("GetConnectState()")
return ret
def get_login_info(self, tag):
ret = self.dynamicCall("GetLoginInfo(QString)", tag)
return ret
def set_input_value(self, id, value):
self.dynamicCall("SetInputValue(QString, QString)", id, value)
def comm_rq_data(self, rqname, trcode, next, screen_no):
self.dynamicCall("CommRqData(QString, QString, int, QString)", rqname, trcode, next, screen_no)
self.tr_event_loop = QEventLoop()
self.tr_event_loop.exec_()
def _comm_get_data(self, code, real_type, field_name, index, item_name):
ret = self.dynamicCall("CommGetData(QString, QString, QString, int, QString)", code,
real_type, field_name, index, item_name)
return ret.strip()
def _get_repeat_cnt(self, trcode, rqname):
ret = self.dynamicCall("GetRepeatCnt(QString, QString)", trcode, rqname)
return ret
def send_order(self, rqname, screen_no, acc_no, order_type, code, quantity, price, hoga, order_no):
self.dynamicCall("SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)",
[rqname, screen_no, acc_no, order_type, code, quantity, price, hoga, order_no])
def get_chejan_data(self, fid):
ret = self.dynamicCall("GetChejanData(int)", fid)
return ret
def get_server_gubun(self):
ret = self.dynamicCall("KOA_Functions(QString, QString)", "GetServerGubun", "")
return ret
def _receive_chejan_data(self, gubun, item_cnt, fid_list):
print(gubun)
print(self.get_chejan_data(9203))
print(self.get_chejan_data(302))
print(self.get_chejan_data(900))
print(self.get_chejan_data(901))
def _receive_tr_data(self, screen_no, rqname, trcode, record_name, next, unused1, unused2, unused3, unused4):
if next == '2':
self.remained_data = True
else:
self.remained_data = False
if rqname == "opt10081_req":
self._opt10081(rqname, trcode)
elif rqname == "opw00001_req":
self._opw00001(rqname, trcode)
elif rqname == "opw00018_req":
self._opw00018(rqname, trcode)
try:
self.tr_event_loop.exit()
except AttributeError:
pass
@staticmethod
def change_format(data):
strip_data = data.lstrip('-0')
if strip_data == '' or strip_data == '.00':
strip_data = '0'
try:
format_data = format(int(strip_data), ',d')
except:
format_data = format(float(strip_data))
if data.startswith('-'):
format_data = '-' + format_data
return format_data
@staticmethod
def change_format2(data):
strip_data = data.lstrip('-0')
if strip_data == '':
strip_data = '0'
if strip_data.startswith('.'):
strip_data = '0' + strip_data
if data.startswith('-'):
strip_data = '-' + strip_data
return strip_data
def _opw00001(self, rqname, trcode):
d2_deposit = self._comm_get_data(trcode, "", rqname, 0, "d+2추정예수금")
self.d2_deposit = Kiwoom.change_format(d2_deposit)
def _opt10081(self, rqname, trcode):
data_cnt = self._get_repeat_cnt(trcode, rqname)
for i in range(data_cnt):
date = self._comm_get_data(trcode, "", rqname, i, "일자")
open = self._comm_get_data(trcode, "", rqname, i, "시가")
high = self._comm_get_data(trcode, "", rqname, i, "고가")
low = self._comm_get_data(trcode, "", rqname, i, "저가")
close = self._comm_get_data(trcode, "", rqname, i, "현재가")
volume = self._comm_get_data(trcode, "", rqname, i, "거래량")
self.ohlcv['date'].append(date)
self.ohlcv['open'].append(int(open))
self.ohlcv['high'].append(int(high))
self.ohlcv['low'].append(int(low))
self.ohlcv['close'].append(int(close))
self.ohlcv['volume'].append(int(volume))
def reset_opw00018_output(self):
self.opw00018_output = {'single': [], 'multi': []}
def _opw00018(self, rqname, trcode):
# single data
total_purchase_price = self._comm_get_data(trcode, "", rqname, 0, "총매입금액")
total_eval_price = self._comm_get_data(trcode, "", rqname, 0, "총평가금액")
total_eval_profit_loss_price = self._comm_get_data(trcode, "", rqname, 0, "총평가손익금액")
total_earning_rate = self._comm_get_data(trcode, "", rqname, 0, "총수익률(%)")
estimated_deposit = self._comm_get_data(trcode, "", rqname, 0, "추정예탁자산")
self.opw00018_output['single'].append(Kiwoom.change_format(total_purchase_price))
self.opw00018_output['single'].append(Kiwoom.change_format(total_eval_price))
self.opw00018_output['single'].append(Kiwoom.change_format(total_eval_profit_loss_price))
total_earning_rate = Kiwoom.change_format(total_earning_rate)
if self.get_server_gubun():
total_earning_rate = float(total_earning_rate) / 100
total_earning_rate = str(total_earning_rate)
self.opw00018_output['single'].append(total_earning_rate)
self.opw00018_output['single'].append(Kiwoom.change_format(estimated_deposit))
# multi data
rows = self._get_repeat_cnt(trcode, rqname)
for i in range(rows):
name = self._comm_get_data(trcode, "", rqname, i, "종목명")
quantity = self._comm_get_data(trcode, "", rqname, i, "보유수량")
purchase_price = self._comm_get_data(trcode, "", rqname, i, "매입가")
current_price = self._comm_get_data(trcode, "", rqname, i, "현재가")
eval_profit_loss_price = self._comm_get_data(trcode, "", rqname, i, "평가손익")
earning_rate = self._comm_get_data(trcode, "", rqname, i, "수익률(%)")
quantity = Kiwoom.change_format(quantity)
purchase_price = Kiwoom.change_format(purchase_price)
current_price = Kiwoom.change_format(current_price)
eval_profit_loss_price = Kiwoom.change_format(eval_profit_loss_price)
earning_rate = Kiwoom.change_format2(earning_rate)
self.opw00018_output['multi'].append([name, quantity, purchase_price, current_price, eval_profit_loss_price,
earning_rate])
if __name__ == "__main__":
app = QApplication(sys.argv)
kiwoom = Kiwoom()
kiwoom.comm_connect()
kiwoom.reset_opw00018_output()
account_number = kiwoom.get_login_info("ACCNO")
account_number = account_number.split(';')[0]
kiwoom.set_input_value("계좌번호", account_number)
kiwoom.comm_rq_data("opw00018_req", "opw00018", 0, "2000")
print(kiwoom.opw00018_output['single'])
print(kiwoom.opw00018_output['multi'])
| mit |
njwilson23/scipy | scipy/special/add_newdocs.py | 24 | 70839 | # Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
.. math:: Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} e^{i m \theta} P^m_n(\cos(\phi))
Parameters
----------
m : int
``|m| <= n``; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30. http://dlmf.nist.gov/14.30
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "airy",
"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : float or complex
Argument.
Returns
-------
Ai, Aip, Bi, Bip
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
The Airy functions Ai and Bi are two independent solutions of y''(x) = x y.
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs((2.0/3.0*z*sqrt(z)).real))
eBip = Bip * exp(-abs((2.0/3.0*z*sqrt(z)).real))
Parameters
----------
z : float or complex
Argument.
Returns
-------
eAi, eAip, eBi, eBip
Airy functions Ai and Bi, and their derivatives Aip and Bip
""")
add_newdoc("scipy.special", "bdtr",
"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through k of the Binomial probability density.
::
y = sum(nCj p**j (1-p)**(n-j),j=0..k)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtrc",
"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms k+1 through n of the Binomial probability density
::
y = sum(nCj p**j (1-p)**(n-j), j=k+1..n)
Parameters
----------
k, n : int
Terms to include
p : float
Probability
Returns
-------
y : float
Sum of terms
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to bdtr vs. p
Finds probability `p` such that for the cumulative binomial
probability ``bdtr(k, n, p) == y``.
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to bdtr vs k
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to bdtr vs n
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
""")
add_newdoc("scipy.special", "btdtria",
"""
btdtria(p, b, x)
Inverse of btdtr vs a
""")
add_newdoc("scipy.special", "btdtrib",
"""
btdtria(a, p, x)
Inverse of btdtr vs b
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function bei
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function ber
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a,b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to x::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute x such that betainc(a,b,x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(x)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
"""
btdtr(a,b,x)
Cumulative beta distribution.
Returns the area from zero to x under the beta density function::
gamma(a+b)/(gamma(a)*gamma(b)))*integral(t**(a-1) (1-t)**(b-1), t=0..x)
See Also
--------
betainc
""")
add_newdoc("scipy.special", "btdtri",
"""
btdtri(a,b,p)
p-th quantile of the beta distribution.
This is effectively the inverse of btdtr returning the value of x for which
``btdtr(a,b,x) = p``
See Also
--------
betaincinv
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Cube root of x
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to x) of the Chi
square probability density function with v degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v,x)
Chi square survival function
Returns the area under the right hand tail (from x to
infinity) of the Chi square probability density function with v
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v,p)
Inverse to chdtrc
Returns the argument x such that ``chdtrc(v,x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtri(p, x)
Inverse to chdtr vs v
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to chndtr vs x
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to chndtr vs df
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to chndtr vs nc
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle x given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle x given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2),t=0..x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "ellipe",
"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipeinc",
"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter m between
0 and 1, and real u.
Parameters
----------
m, u
Parameters
Returns
-------
sn, cn, dn, ph
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value ``ph`` is such that if ``u = ellik(ph, m)``,
then ``sn(u|m) = sin(ph)`` and ``cn(u|m) = cos(ph)``.
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around m = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as m = 1 - p.
Returns
-------
K : ndarray
Value of the elliptic integral.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "ellipkinc",
"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\\phi, m) = \\int_0^{\\phi} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
This function is also called ``F(phi, m)``.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near m = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points x.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points x.
See Also
--------
erfc, erfinv, erfcinv
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, 1 - erf(x).
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, -i erf(i z).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, exp(x^2) erfc(x).
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "eval_jacobi",
"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
""")
add_newdoc("scipy.special", "eval_gegenbauer",
"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyt",
"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev T polynomial at a point.
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebys",
"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev S polynomial at a point.
""")
add_newdoc("scipy.special", "eval_chebyc",
"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev C polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev T polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev U polynomial at a point.
""")
add_newdoc("scipy.special", "eval_legendre",
"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_sh_legendre",
"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_genlaguerre",
"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_laguerre",
"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermite",
"""
eval_hermite(n, x, out=None)
Evaluate Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "eval_hermitenorm",
"""
eval_hermitenorm(n, x, out=None)
Evaluate normalized Hermite polynomial at a point.
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t,t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
10**x
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
2**x
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t,t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
exp(x) - 1 for use when x is near zero.
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer n and non-negative x and n::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, (exp(x)-1)/x, for use when x is near zero.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
expm1
.. versionadded:: 0.17.0
""")
add_newdoc("scipy.special", "fdtr",
"""
fdtr(dfn, dfd, x)
F cumulative distribution function
Returns the area from zero to x under the F density function (also
known as Snedcor's density or the variance ratio density). This
is the density of X = (unum/dfn)/(uden/dfd), where unum and uden
are random variables having Chi square distributions with dfn and
dfd degrees of freedom, respectively.
""")
add_newdoc("scipy.special", "fdtrc",
"""
fdtrc(dfn, dfd, x)
F survival function
Returns the complemented F distribution function.
""")
add_newdoc("scipy.special", "fdtri",
"""
fdtri(dfn, dfd, p)
Inverse to fdtr vs x
Finds the F density argument x such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to fdtr vs dfd
Finds the F density argument dfd such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to fdtr vs dfn
finds the F density argument dfn such that ``fdtr(dfn,dfd,x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2),t=0..z)
csa = integral(cos(pi/2 * t**2),t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
"""
gamma(z)
Gamma function
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
""")
add_newdoc("scipy.special", "gammainc",
"""
gammainc(a, x)
Incomplete gamma function
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=0..x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammaincc",
"""
gammaincc(a,x)
Complemented incomplete gamma integral
Defined as::
1 / gamma(a) * integral(exp(-t) * t**(a-1), t=x..inf) = 1 - gammainc(a,x)
`a` must be positive and `x` must be >= 0.
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a,y)
Inverse to gammaincc
Returns `x` such that ``gammaincc(a,x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to gammainc
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
gammaln(z)
Logarithm of absolute value of gamma function
Defined as::
ln(abs(gamma(z)))
See Also
--------
gammasgn
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
""")
add_newdoc("scipy.special", "gdtr",
"""
gdtr(a,b,x)
Gamma distribution cumulative density function.
Returns the integral from zero to x of the gamma probability
density function::
a**b / gamma(b) * integral(t**(b-1) exp(-at),t=0..x).
The arguments a and b are used differently here than in other
definitions.
""")
add_newdoc("scipy.special", "gdtrc",
"""
gdtrc(a,b,x)
Gamma distribution survival function.
Integral from x to infinity of the gamma probability density
function.
See Also
--------
gdtr, gdtri
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of gdtr vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of gdtr vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of gdtr vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : float
Order
z : float or complex
Argument
""")
add_newdoc("scipy.special", "hankel1e",
"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(-1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2",
"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "hankel2e",
"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel1e(v,z) = hankel1(v,z) * exp(1j * z)
Parameters
----------
v : float
Order
z : complex
Argument
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z).
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
"""
i0(x)
Modified Bessel function of order 0
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
""")
add_newdoc("scipy.special", "i1",
"""
i1(x)
Modified Bessel function of order 1
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t,t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
"""
it2struve0(x)
Integral related to Struve function of order 0
Returns
-------
i
``integral(H0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integral of Airy functions from 0 to x
Returns
-------
Apt, Bpt
Integrals for positive arguments
Ant, Bnt
Integrals for negative arguments
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order modified
Bessel functions i0 and k0.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to x of the zeroth order Bessel
functions j0 and y0.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
"""
itmodstruve0(x)
Integral of the modified Struve function of order 0
Returns
-------
i
``integral(L0(t), t=0..x)``
""")
add_newdoc("scipy.special", "itstruve0",
"""
itstruve0(x)
Integral of the Struve function of order 0
Returns
-------
i
``integral(H0(t), t=0..x)``
""")
add_newdoc("scipy.special", "iv",
"""
iv(v,z)
Modified Bessel function of the first kind of real order
Parameters
----------
v
Order. If z is of real type and negative, v must be integer valued.
z
Argument.
""")
add_newdoc("scipy.special", "ive",
"""
ive(v,z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v,z) = iv(v,z) * exp(-abs(z.real))
""")
add_newdoc("scipy.special", "j0",
"""
j0(x)
Bessel function the first kind of order 0
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order n.
Notes
-----
`jn` is an alias of `jv`.
""")
add_newdoc("scipy.special", "jv",
"""
jv(v, z)
Bessel function of the first kind of real order v
""")
add_newdoc("scipy.special", "jve",
"""
jve(v, z)
Exponentially scaled Bessel function of order v
Defined as::
jve(v,z) = jv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "k0",
"""
k0(x)
Modified Bessel function K of order 0
Modified Bessel function of the second kind (sometimes called the
third kind) of order 0.
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
""")
add_newdoc("scipy.special", "k1",
"""
i1(x)
Modified Bessel function of the first kind of order 1
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at x. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "kn",
"""
kn(n, x)
Modified Bessel function of the second kind of integer order n
These are also sometimes called functions of the third kind.
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
"""
kv(v,z)
Modified Bessel function of the second kind of real order v
Returns the modified Bessel function of the second kind (sometimes
called the third kind) for real order v at complex z.
""")
add_newdoc("scipy.special", "kve",
"""
kve(v,z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order v at
complex z::
kve(v,z) = kv(v,z) * exp(z)
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when x is near zero
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <http://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
""")
add_newdoc("scipy.special", "lpmv",
"""
lpmv(m, v, x)
Associated legendre function of integer order.
Parameters
----------
m : int
Order
v : real
Degree. Must be ``v>-m-1`` or ``v<m``
x : complex
Argument. Must be ``|x| <= 1``.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m,q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m,q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z,q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m,q,x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x,q)``, of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of ce_m(x,q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x,q)``, and its derivative at `x` for order m and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m,q,x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x,q), and its derivative at x (given in degrees) for order m
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x,q), of order m and
parameter q evaluated at x (given in degrees). Also returns the
derivative with respect to x of se_m(x,q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t),t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t),t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
"""
modstruve(v, x)
Modified Struve function
Returns the modified Struve function Lv(x) of order v at x, x must
be positive unless v is an integer.
""")
add_newdoc("scipy.special", "nbdtr",
"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function
Returns the sum of the terms 0 through k of the negative binomial
distribution::
sum((n+j-1)Cj p**n (1-p)**j,j=0..k).
In a sequence of Bernoulli trials this is the probability that k
or fewer failures precede the nth success.
""")
add_newdoc("scipy.special", "nbdtrc",
"""
nbdtrc(k,n,p)
Negative binomial survival function
Returns the sum of the terms k+1 to infinity of the negative
binomial distribution.
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of nbdtr vs p
Finds the argument p such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrik",
"""
nbdtrik(y,n,p)
Inverse of nbdtr vs k
Finds the argument k such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "nbdtrin",
"""
nbdtrin(k,y,p)
Inverse of nbdtr vs n
Finds the argument n such that ``nbdtr(k,n,p) = y``.
""")
add_newdoc("scipy.special", "ncfdtr",
"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncdfdtri : Inverse CDF (iCDF) of the non-central F distribution.
ncdfdtridfd : Calculate dfd, given CDF and iCDF values.
ncdfdtridfn : Calculate dfn, given CDF and iCDF values.
ncdfdtrinc : Calculate noncentrality parameter, given CDF, iCDF, dfn, dfd.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(p, dfn, dfd, nc)
Inverse cumulative distribution function of the non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(p, f, dfn, nc)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, f, dfd, nc)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(p, f, dfn, dfd)
Calculate non-centrality parameter for non-central F distribution.
See `ncfdtr` for more details.
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central t distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
"""
ndtr(x)
Gaussian cumulative distribution function
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
1/sqrt(2*pi) * integral(exp(-t**2 / 2),t=-inf..x)
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to x::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of ndtr vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to x)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m,n,c,x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m,n,c,x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m,n,c,cv,x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d,dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v,x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
"""
pbwa(a,x)
Parabolic cylinder function W
Returns the parabolic cylinder function W(a,x) in w and the
derivative, W'(a,x) in wp.
.. warning::
May not be accurate for large (>5) arguments in a and/or x.
Returns
-------
w
Value of the function
wp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first k terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and k an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k,y)
Inverse to pdtr vs m
Returns the Poisson variable m such that the sum from 0 to k of
the Poisson density is equal to the given probability y:
calculated by gammaincinv(k+1, y). k must be a nonnegative
integer and y between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p,m)
Inverse to pdtr vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as::
(z)_m = gamma(z + m) / gamma(z)
For positive integer `m` it reads::
(z)_m = z * (z + 1) * ... * (z + m - 1)
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m,n,c,x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m,n,c,cv,x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m,n,c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order m,n (n>=m) and spheroidal parameter c.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m,n,c,x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m,n,c,x)
Prolate spheroidal radial function of the secon kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m,n,c,cv,x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to x) for mode parameters m>=0
and n>=m, spheroidal parameter c and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z)
Digamma function
The derivative of the logarithm of the gamma function evaluated at
z (also called the digamma function).
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.14.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to x as a double precision floating
point result. If x ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
"""
shichi(x)
Hyperbolic sine and cosine integrals
Returns
-------
shi
``integral(sinh(t)/t, t=0..x)``
chi
``eul + ln x + integral((cosh(t)-1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sici",
"""
sici(x)
Sine and cosine integrals
Returns
-------
si
``integral(sin(t)/t, t=0..x)``
ci
``eul + ln x + integral((cos(t) - 1)/t, t=0..x)``
where ``eul`` is Euler's constant.
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on n samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to smirnov
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
"""
spence(x)
Dilogarithm integral
Returns the dilogarithm integral::
-integral(log t / (t-1),t=1..x)
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df,t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p,t)
Inverse of stdtr vs df
Returns the argument df such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df,p)
Inverse of stdtr vs t
Returns the argument t such that stdtr(df,t) is equal to p.
""")
add_newdoc("scipy.special", "struve",
"""
struve(v,x)
Struve function
Computes the struve function Hv(x) of order v at x, x must be
positive unless v is an integer.
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2)*erfc(-i*z)
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if `x = 0`.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
"""
y0(x)
Bessel function of the second kind of order 0
Returns the Bessel function of the second kind of order 0 at x.
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1
Returns the Bessel function of the second kind of order 1 at x.
""")
add_newdoc("scipy.special", "yn",
"""
yn(n,x)
Bessel function of the second kind of integer order
Returns the Bessel function of the second kind of integer order n
at x.
""")
add_newdoc("scipy.special", "yv",
"""
yv(v,z)
Bessel function of the second kind of real order
Returns the Bessel function of the second kind of real order v at
complex z.
""")
add_newdoc("scipy.special", "yve",
"""
yve(v,z)
Exponentially scaled Bessel function of the second kind of real order
Returns the exponentially scaled Bessel function of the second
kind of real order v at complex z::
yve(v,z) = yv(v,z) * exp(-abs(z.imag))
""")
add_newdoc("scipy.special", "zeta",
"""
zeta(x, q)
Hurwitz zeta function
The Riemann zeta function of two arguments (also known as the
Hurwitz zeta funtion).
This function is defined as
.. math:: \\zeta(x, q) = \\sum_{k=0}^{\\infty} 1 / (k+q)^x,
where ``x > 1`` and ``q > 0``.
See also
--------
zetac
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing struve & modstruve
Evaluates using Bessel function series
Returns
-------
v, err
""")
| bsd-3-clause |
gasabr/AtoD | atod/models/match.py | 1 | 2832 | ''' Class to get match description. '''
import dota2api
import pandas as pd
from atod import Hero, Heroes, files
from atod.utils.dota_api import api
class Match(object):
''' Representation of the single match.
Attributes:
id (int) : id of the match
radiant (Heroes): Heroes in radiant team
dire (Heroes): Heroes in dire team
'''
def __init__(self, match_id: int):
''' Calls the API and creates a match representation from result.
Args:
match_id: Dota match ID
'''
if not isinstance(match_id, int):
raise TypeError('`match_id` must have type int.')
else:
self.id = match_id
response = api.get_match_details(match_id=match_id)
# TODO: check supported modes, rewrote this
if response['game_mode'] != 2 and response['game_mode'] != 16:
raise NotImplementedError('Sorry, Match currently'
+ ' does not support {}'.format(response['game_mode_name'])
+ ' game mode.')
self.radiant = Heroes()
self.dire = Heroes()
self.radiant_win = response['radiant_win']
# select picks and add heroes to appropriate teams
for pick in filter(lambda x: x['is_pick'], response['picks_bans']):
if pick['team'] == 0:
self.radiant.add(Hero(pick['hero_id']))
else:
self.dire.add(Hero(pick['hero_id']))
def get_description(self, include):
''' Returns description of certain match.
Description consist of 3 parts: radiant description, dire description
and result. Complete length of description vector is 2n + 1, where
n is a length of side description (depends on choosen parameters).
Args:
include (list): the same with Hero.get_description().
Returns:
pd.Series
'''
# get descriptions of sides
radiant_description = self.radiant.get_description(include)
dire_description = self.dire.get_description(include)
len_desc = radiant_description.shape[0]
# create array for MultiIndex object
index_arrays = [['radiant'] * len_desc + ['dire'] * len_desc,
list(radiant_description.index) * 2]
# convert array to list of tuples
index_tuples = list(zip(*index_arrays))
# add result comlumn
index_tuples.append(('result', 'radiant_win'))
index = pd.MultiIndex.from_tuples(index_tuples,
names=['side', 'variables'])
# unite all the columns
variables = [*radiant_description, *dire_description, self.radiant_win]
description = pd.Series(variables, index=index)
return description
| mit |
grahesh/Stock-Market-Event-Analysis | qstksim/tests/test_tradesim_SPY_Short.py | 3 | 2617 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on May 19, 2012
@author: Sourabh Bajaj
@contact: [email protected]
@summary: Test cases for tradeSim - Monthly Rebalancing of $SPX
'''
# Python imports
import datetime as dt
import unittest
# 3rd Party Imports
import pandas as pand
import numpy as np
# QSTK imports
import qstksim
import qstkutil.DataAccess as da
import qstkutil.qsdateutil as du
class Test(unittest.TestCase):
df_close = None
df_alloc = None
i_open_result = None
def _generate_data(self):
year = 2009
startday = dt.datetime(year-1, 12, 1)
endday = dt.datetime(year+1, 1, 31)
l_symbols = ['$SPX']
#Get desired timestamps
timeofday = dt.timedelta(hours = 16)
ldt_timestamps = du.getNYSEdays(startday, endday, timeofday)
dataobj = da.DataAccess('Norgate')
self.df_close = dataobj.get_data( \
ldt_timestamps, l_symbols, "close", verbose=True)
self.df_alloc = pand.DataFrame( \
index=[dt.datetime(year, 1, 1)], \
data=[-1], columns=l_symbols)
for i in range(11):
self.df_alloc = self.df_alloc.append( \
pand.DataFrame(index=[dt.datetime(year, i+2, 1)], \
data=[-1], columns=l_symbols))
self.df_alloc['_CASH'] = 0.0
#Based on hand calculation using the transaction costs and slippage.
self.i_open_result = 0.7541428779600005
def setUp(self):
''' Unittest setup function '''
self._generate_data()
def test_buy_close(self):
''' Tests tradesim buy-on-open functionality '''
(df_funds, ts_leverage, f_commision, f_slippage, f_borrow) = \
qstksim.tradesim( self.df_alloc, self.df_close, 10000, 1, True,
0.02, 5, 0.02)
print 'Commision Costs : ' + str(f_commision)
print 'Slippage : ' + str(f_slippage)
print 'Short Borrowing Cost : ' + str(f_borrow)
print 'Leverage : '
print ts_leverage
np.testing.assert_approx_equal(df_funds[-1], \
10000 * self.i_open_result, significant = 3)
self.assertTrue(True)
#self.assertTrue(abs(df_funds[-1] - 10000 * self.i_open_result)<=0.01)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| bsd-3-clause |
rknLA/sms-tools | lectures/09-Sound-description/plots-code/spectralFlux-onsetFunction.py | 25 | 1330 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
flux = ess.Flux()
onsetDetection = ess.OnsetDetection(method='hfc')
x = ess.MonoLoader(filename = '../../../sounds/speech-male.wav', sampleRate = fs)()
fluxes = []
onsetDetections = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
flux_val = flux(mX)
fluxes.append(flux_val)
onsetDetection_val = onsetDetection(mX, mX)
onsetDetections.append(onsetDetection_val)
onsetDetections = np.array(onsetDetections)
fluxes = np.array(fluxes)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (speech-male.wav)')
plt.subplot(2,1,2)
frmTime = H*np.arange(fluxes.size)/float(fs)
plt.plot(frmTime, fluxes/max(fluxes), 'g', lw=1.5, label ='normalized spectral flux')
plt.plot(frmTime, onsetDetections/max(onsetDetections), 'c', lw=1.5, label = 'normalized onset detection')
plt.axis([0, x.size/float(fs), 0, 1])
plt.legend()
plt.tight_layout()
plt.savefig('spectralFlux-onsetFunction.png')
plt.show()
| agpl-3.0 |
walterreade/scikit-learn | sklearn/datasets/lfw.py | 31 | 19544 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
ngaude/cdiscount | sampling.py | 2 | 2087 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
from sklearn.externals import joblib
import time
import pandas as pd
import random
import sys
from utils import ddir,header
ext = '.0' # default value
print '-'*50
import sys
if len(sys.argv)==2:
ext = '.'+str(int(sys.argv[1]))
print 'sampling auto '+ext
def training_sample_random(df,N = 200,mincount=7):
N = int(N)
cl = df.Categorie3
cc = cl.groupby(cl)
s = (cc.count() >= mincount)
labelmaj = s[s].index
print 'sampling =',N,'samples for any of',len(labelmaj),'classes'
dfs = []
for i,cat in enumerate(labelmaj):
if i%100==0:
print 'sampling',i,'/',len(labelmaj),':'
dfcat = df[df.Categorie3 == cat]
sample_count = N
if len(dfcat)>=sample_count:
# undersample sample_count samples : take the closest first
rows = random.sample(dfcat.index, sample_count)
dfs.append(dfcat.ix[rows])
else:
# sample all samples + oversample the remaining
dfs.append(dfcat)
dfcat = dfcat.iloc[np.random.randint(0, len(dfcat), size=sample_count-len(dfcat))]
dfs.append(dfcat)
dfsample = pd.concat(dfs)
dfsample = dfsample.reset_index(drop=True)
dfsample = dfsample.reindex(np.random.permutation(dfsample.index),copy=False)
return dfsample
##########################
# sampling a training set
##########################
df = pd.read_csv(ddir+'training_head.csv',sep=';',names = header()).fillna('')
fname = ddir+'training_sample.csv'+ext
print '>>'+fname
dfsample = training_sample_random(df,N=456,mincount=7)
dfsample.to_csv(fname,sep=';',index=False,header=False)
print '<<'+fname
##########################
# sampling a validation set
##########################
df = pd.read_csv(ddir+'training_tail.csv',sep=';',names = header()).fillna('')
fname = ddir+'validation_sample.csv'+ext
print '>>'+fname
dfsample = training_sample_random(df,N=7,mincount=1)
dfsample.to_csv(fname, sep=';',index=False,header=False)
print '<<'+fname
print '-'*50
| gpl-2.0 |
siutanwong/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
hoho/dosido | nodejs/deps/v8/tools/ignition/bytecode_dispatches_report.py | 12 | 9172 | #! /usr/bin/python
#
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
import argparse
import heapq
import json
from matplotlib import colors
from matplotlib import pyplot
import numpy
import struct
import sys
__DESCRIPTION = """
Process v8.ignition_dispatches_counters.json and list top counters,
or plot a dispatch heatmap.
Please note that those handlers that may not or will never dispatch
(e.g. Return or Throw) do not show up in the results.
"""
__HELP_EPILOGUE = """
examples:
# Print the hottest bytecodes in descending order, reading from
# default filename v8.ignition_dispatches_counters.json (default mode)
$ tools/ignition/bytecode_dispatches_report.py
# Print the hottest 15 bytecode dispatch pairs reading from data.json
$ tools/ignition/bytecode_dispatches_report.py -t -n 15 data.json
# Save heatmap to default filename v8.ignition_dispatches_counters.svg
$ tools/ignition/bytecode_dispatches_report.py -p
# Save heatmap to filename data.svg
$ tools/ignition/bytecode_dispatches_report.py -p -o data.svg
# Open the heatmap in an interactive viewer
$ tools/ignition/bytecode_dispatches_report.py -p -i
# Display the top 5 sources and destinations of dispatches to/from LdaZero
$ tools/ignition/bytecode_dispatches_report.py -f LdaZero -n 5
"""
__COUNTER_BITS = struct.calcsize("P") * 8 # Size in bits of a pointer
__COUNTER_MAX = 2**__COUNTER_BITS - 1
def warn_if_counter_may_have_saturated(dispatches_table):
for source, counters_from_source in iteritems(dispatches_table):
for destination, counter in iteritems(counters_from_source):
if counter == __COUNTER_MAX:
print "WARNING: {} -> {} may have saturated.".format(source,
destination)
def find_top_bytecode_dispatch_pairs(dispatches_table, top_count):
def flattened_counters_generator():
for source, counters_from_source in iteritems(dispatches_table):
for destination, counter in iteritems(counters_from_source):
yield source, destination, counter
return heapq.nlargest(top_count, flattened_counters_generator(),
key=lambda x: x[2])
def print_top_bytecode_dispatch_pairs(dispatches_table, top_count):
top_bytecode_dispatch_pairs = (
find_top_bytecode_dispatch_pairs(dispatches_table, top_count))
print "Top {} bytecode dispatch pairs:".format(top_count)
for source, destination, counter in top_bytecode_dispatch_pairs:
print "{:>12d}\t{} -> {}".format(counter, source, destination)
def find_top_bytecodes(dispatches_table):
top_bytecodes = []
for bytecode, counters_from_bytecode in iteritems(dispatches_table):
top_bytecodes.append((bytecode, sum(itervalues(counters_from_bytecode))))
top_bytecodes.sort(key=lambda x: x[1], reverse=True)
return top_bytecodes
def print_top_bytecodes(dispatches_table):
top_bytecodes = find_top_bytecodes(dispatches_table)
print "Top bytecodes:"
for bytecode, counter in top_bytecodes:
print "{:>12d}\t{}".format(counter, bytecode)
def find_top_dispatch_sources_and_destinations(
dispatches_table, bytecode, top_count, sort_source_relative):
sources = []
for source, destinations in iteritems(dispatches_table):
total = float(sum(itervalues(destinations)))
if bytecode in destinations:
count = destinations[bytecode]
sources.append((source, count, count / total))
destinations = []
bytecode_destinations = dispatches_table[bytecode]
bytecode_total = float(sum(itervalues(bytecode_destinations)))
for destination, count in iteritems(bytecode_destinations):
destinations.append((destination, count, count / bytecode_total))
return (heapq.nlargest(top_count, sources,
key=lambda x: x[2 if sort_source_relative else 1]),
heapq.nlargest(top_count, destinations, key=lambda x: x[1]))
def print_top_dispatch_sources_and_destinations(dispatches_table, bytecode,
top_count, sort_relative):
top_sources, top_destinations = find_top_dispatch_sources_and_destinations(
dispatches_table, bytecode, top_count, sort_relative)
print "Top sources of dispatches to {}:".format(bytecode)
for source_name, counter, ratio in top_sources:
print "{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, source_name)
print "\nTop destinations of dispatches from {}:".format(bytecode)
for destination_name, counter, ratio in top_destinations:
print "{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, destination_name)
def build_counters_matrix(dispatches_table):
labels = sorted(dispatches_table.keys())
counters_matrix = numpy.empty([len(labels), len(labels)], dtype=int)
for from_index, from_name in enumerate(labels):
current_row = dispatches_table[from_name];
for to_index, to_name in enumerate(labels):
counters_matrix[from_index, to_index] = current_row.get(to_name, 0)
# Reverse y axis for a nicer appearance
xlabels = labels
ylabels = list(reversed(xlabels))
counters_matrix = numpy.flipud(counters_matrix)
return counters_matrix, xlabels, ylabels
def plot_dispatches_table(dispatches_table, figure, axis):
counters_matrix, xlabels, ylabels = build_counters_matrix(dispatches_table)
image = axis.pcolor(
counters_matrix,
cmap="jet",
norm=colors.LogNorm(),
edgecolor="grey",
linestyle="dotted",
linewidth=0.5
)
axis.xaxis.set(
ticks=numpy.arange(0.5, len(xlabels)),
label="From bytecode handler"
)
axis.xaxis.tick_top()
axis.set_xlim(0, len(xlabels))
axis.set_xticklabels(xlabels, rotation="vertical")
axis.yaxis.set(
ticks=numpy.arange(0.5, len(ylabels)),
label="To bytecode handler",
ticklabels=ylabels
)
axis.set_ylim(0, len(ylabels))
figure.colorbar(
image,
ax=axis,
fraction=0.01,
pad=0.01
)
def parse_command_line():
command_line_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__DESCRIPTION,
epilog=__HELP_EPILOGUE
)
command_line_parser.add_argument(
"--plot-size", "-s",
metavar="N",
default=30,
help="shorter side in inches of the output plot (default 30)"
)
command_line_parser.add_argument(
"--plot", "-p",
action="store_true",
help="plot dispatch pairs heatmap"
)
command_line_parser.add_argument(
"--interactive", "-i",
action="store_true",
help="open the heatmap in an interactive viewer, instead of writing to file"
)
command_line_parser.add_argument(
"--top-bytecode-dispatch-pairs", "-t",
action="store_true",
help="print the top bytecode dispatch pairs"
)
command_line_parser.add_argument(
"--top-entries-count", "-n",
metavar="N",
type=int,
default=10,
help="print N top entries when running with -t or -f (default 10)"
)
command_line_parser.add_argument(
"--top-dispatches-for-bytecode", "-f",
metavar="<bytecode name>",
help="print top dispatch sources and destinations to the specified bytecode"
)
command_line_parser.add_argument(
"--output-filename", "-o",
metavar="<output filename>",
default="v8.ignition_dispatches_table.svg",
help=("file to save the plot file to. File type is deduced from the "
"extension. PDF, SVG, PNG supported")
)
command_line_parser.add_argument(
"--sort-sources-relative", "-r",
action="store_true",
help=("print top sources in order to how often they dispatch to the "
"specified bytecode, only applied when using -f")
)
command_line_parser.add_argument(
"input_filename",
metavar="<input filename>",
default="v8.ignition_dispatches_table.json",
nargs='?',
help="Ignition counters JSON file"
)
return command_line_parser.parse_args()
def itervalues(d):
return d.values() if sys.version_info[0] > 2 else d.itervalues()
def iteritems(d):
return d.items() if sys.version_info[0] > 2 else d.iteritems()
def main():
program_options = parse_command_line()
with open(program_options.input_filename) as stream:
dispatches_table = json.load(stream)
warn_if_counter_may_have_saturated(dispatches_table)
if program_options.plot:
figure, axis = pyplot.subplots()
plot_dispatches_table(dispatches_table, figure, axis)
if program_options.interactive:
pyplot.show()
else:
figure.set_size_inches(program_options.plot_size,
program_options.plot_size)
pyplot.savefig(program_options.output_filename)
elif program_options.top_bytecode_dispatch_pairs:
print_top_bytecode_dispatch_pairs(
dispatches_table, program_options.top_entries_count)
elif program_options.top_dispatches_for_bytecode:
print_top_dispatch_sources_and_destinations(
dispatches_table, program_options.top_dispatches_for_bytecode,
program_options.top_entries_count, program_options.sort_sources_relative)
else:
print_top_bytecodes(dispatches_table)
if __name__ == "__main__":
main()
| mit |
etkirsch/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
najmacherrad/master_thesis | Waltz/plotcomp1kg_waltz.py | 1 | 11632 | # Waltz
# Compare results between wild type and mutant
# coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
from scipy import stats
from pylab import plot, show, savefig, xlim, figure, \
hold, ylim, legend, boxplot, setp, axes
import pylab
def getColumn(filename, column,deli):
results = csv.reader(open(filename), delimiter=deli)
return [result[column] for result in results]
#import files
file_wt = 'waltzresultsNEW_wt.csv'
file_mut = 'waltzresultsNEW_1kg.csv'
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# AGGREGATION
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#--------------------------------------
# SCATTER PLOT
pred_wt = getColumn(file_wt,3,'\t')
pred_mut = getColumn(file_mut,3,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
x,y=[],[]
for i in range(0,len(pred_wt)): #max=98.662207
if pred_wt[i]=='NA':
x.append(np.nan)
else:
x.append(float(pred_wt[i]))
for i in range(0,len(pred_mut)): #max=99.665552
if pred_mut[i]=='NA':
y.append(np.nan)
else:
y.append(float(pred_mut[i]))
fig = plt.figure()
a=b=[0,100]
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(-1,101)
plt.ylim(-1,101)
plt.xlabel('Wild types')
plt.ylabel('Neutral 1KGP mutants')
fig.savefig('waltz_wtVS1kg.jpg')
#----------------
# PROBABILITY DENSITY CURVE
fig = figure()
mu1, std1 = stats.norm.fit(x)
mu2, std2 = stats.norm.fit(y)
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, 100, 100)
x2 = np.linspace(xmin2, 100, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'k',label='Wild types (fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.plot(x2, p2, 'b',label='Neutral 1KGP mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2))
plt.xlabel('Aggregation conformation predicted values (amylogenic regions)')
plt.ylabel('Frequency')
plt.xlim(0,100)
plt.ylim(0,0.045)
plt.legend(loc='upper right')
fig.savefig('histwaltz_missense1kg.png')
#missense_wt - missense_mut
miss=[]
[miss.append(a_i - b_i) for a_i, b_i in zip(x, y)]
#KOLMOGOROV-SMINORV:
stats.kstest(miss,'norm') # (D,pvalue) = (0.37733219518495709, 0.0)
#So we reject H0 -> not normal distribution
#WILCOXON TEST:
stats.wilcoxon(miss) # (T, pvalue) = (1948626.0, 0.00024177854220634842)
#So we reject H0 -> There is a significant difference between wt and mut
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# AGGREGATION ENVIRONMENT
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#--------------------------------------
# SCATTER PLOT
pred_wt = getColumn(file_wt,4,'\t')
pred_mut = getColumn(file_mut,4,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
x,y=[],[]
for i in range(0,len(pred_wt)): #max=98.662207
if pred_wt[i]=='NA':
x.append(np.nan)
else:
x.append(float(pred_wt[i]))
for i in range(0,len(pred_mut)): #max=98.996656
if pred_mut[i]=='NA':
y.append(np.nan)
else:
y.append(float(pred_mut[i]))
fig = plt.figure()
a=b=[0,100]
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(-1,101)
plt.ylim(-1,101)
plt.xlabel('Wild types')
plt.ylabel('Neutral 1KGP mutants')
fig.savefig('waltz_envt_wtVS1kg.jpg')
#--------------------------------------
# HISTOGRAM
fig = figure()
mu1, std1 = stats.norm.fit(x)
mu2, std2 = stats.norm.fit(y)
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, 100, 100)
x2 = np.linspace(xmin2, 100, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'k',label='Wild types (fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.plot(x2, p2, 'b',label='Neutral 1KGP mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2))
plt.xlabel('Aggregation conformation predicted values (amylogenic regions)')
plt.ylabel('Frequency')
plt.xlim(0,100)
plt.ylim(0,0.06)
plt.legend(loc='upper right')
fig.savefig('histwaltzenvt_missense_1kg.png')
# STATS
miss=[]
[miss.append(a_i - b_i) for a_i, b_i in zip(x, y)]
#KOLMOGOROV-SMINORV:
stats.kstest(miss,'norm') # (D,pvalue) = (0.36028431579335313, 0.0)
#So we reject H0 -> not normal distribution
#WILCOXON TEST:
stats.wilcoxon(miss) # (T, pvalue) = (3834070.0, 2.138887727291201e-17)
#Donc il y a une différence significative entre wild type et mutant
#-----------------------------------------------------------------------------
# OUTLIERS FOR AGGREGATION ()
#-----------------------------------------------------------------------------
pred_wt = getColumn(file_wt,3,'\t')
pred_mut = getColumn(file_mut,3,'\t')
pred_wt.pop(0)
pred_mut.pop(0)
pred_envt_wt = getColumn(file_wt,4,'\t')
pred_envt_mut = getColumn(file_mut,4,'\t')
pred_envt_wt.pop(0)
pred_envt_mut.pop(0)
variant_liste = getColumn(file_wt,0,'\t')
output = open('waltz_outliers_1kg.csv','w')
output.write('ID,agg_wt,agg_mut,difference,agg_envt_wt,agg_envt_mut,difference_envt\n')
for i in range(0,len(pred_wt)):
for j in range(0,len(pred_mut)):
if i==j:
if pred_wt[i]!='NA'and pred_mut[j]!='NA':
if (abs(float(pred_wt[i])-float(pred_mut[j]))) > 20:
output.write(variant_liste[i+1] + ',' + pred_wt[i] + ',' + pred_mut[j] + ',' + str(abs(float(pred_wt[i])-float(pred_mut[j]))) + ',' + pred_envt_wt[i] + ',' + pred_envt_mut[i] + ',' + str(abs(float(pred_envt_wt[i])-float(pred_envt_mut[j]))) + '\n')
output.close()
#-------------------------------------------------------------------------------
#COMPARISON WITH NETSURFP RSA
#-------------------------------------------------------------------------------
W_wt = pd.read_csv(file_wt,'\t')
W_mut = pd.read_csv(file_mut,'\t')
W_wt['DWaltz'] = ''
W_wt['DWaltz'] = W_wt.aggregation - W_mut.aggregation
W_wt['DWaltz_envt'] = ''
W_wt['DWaltz_envt'] = W_wt.aggregation_envt - W_mut.aggregation_envt
W_wt = W_wt.drop(['aggregation','aggregation_envt'], 1)
W_wt.to_csv('waltzresults_compare1kg.csv', index=False)
T = open('waltzresults_compare1kg.csv','r')
N = open('netsurfpresults_compare1kg.csv','r')
T2 = open('_waltzresults_compare1kg.csv','w')
N2 = open('_netsurfpresults_compare1kg.csv','w')
c1 = csv.reader(T, delimiter=',')
c2 = csv.reader(N, delimiter=',')
c3 = csv.writer(T2)
c4 = csv.writer(N2)
waltz = list(c1) #5'847
netsurfp = list(c2) #2'517
for i in waltz:
for j in netsurfp:
if i[0] == j[0]:
c3.writerow(i)
c4.writerow(j)
N.close()
T.close()
N2.close()
T2.close()
#RESIDUE
waltz = getColumn('_waltzresults_compare1kg.csv',3,',')
netsurfp = getColumn('_netsurfpresults_compare1kg.csv',3,',')
waltz.pop(0)
netsurfp.pop(0)
x,y=[],[]
for i in range(0,len(netsurfp)): #min=-0.183 and max=0.302
if netsurfp[i]=='':
x.append(np.nan)
else:
x.append(float(netsurfp[i]))
for i in range(0,len(waltz)): #min=-98.862207 and max=98.327759
if waltz[i]=='':
y.append(np.nan)
else:
y.append(float(waltz[i]))
fig = plt.figure()
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.grid('on')
plt.xlim(-0.4,0.4)
plt.ylim(-100,100)
plt.xlabel('delta(Solvent accessibility prediction) by NetSurfP')
plt.ylabel('delta(Aggregation conformation prediction) by Waltz')
fig.savefig('WaltzVSnetsurfp_1kg.jpg')
#ENVIRONMENT
waltz_envt = getColumn('_waltzresults_compare1kg.csv',4,',')
netsurfp_envt = getColumn('_netsurfpresults_compare1kg.csv',4,',')
waltz_envt.pop(0)
netsurfp_envt.pop(0)
x,y=[],[]
for i in range(0,len(netsurfp_envt)): #min=-0.127 and max=0.08275
if netsurfp_envt[i]=='':
x.append(np.nan)
else:
x.append(float(netsurfp_envt[i]))
for i in range(0,len(waltz_envt)): #min=-93.645485 and max=97.993311
if waltz_envt[i]=='':
y.append(np.nan)
else:
y.append(float(waltz_envt[i]))
fig = plt.figure()
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.grid('on')
plt.xlim(-0.4,0.4)
plt.ylim(-100,100)
plt.xlabel('delta(Solvent accessibility prediction) by NetSurfP')
plt.ylabel('delta(Aggregation conformation prediction) by Waltz')
fig.savefig('WaltzVSnetsurfp_1kg_envt.jpg')
#-----------------------------------------------------------------------------
# AGGREGATION : COMPARISON deleterious DIDA mutants VS neutral 1KGP mutants
#-----------------------------------------------------------------------------
file_DIDAmut = 'waltzresults_mut.csv'
pred_DIDA = getColumn(file_DIDAmut,3,'\t')
pred_1kg = getColumn(file_mut,3,'\t')
pred_DIDA.pop(0)
pred_1kg.pop(0)
xpred,ypred=[],[]
for i in range(0,len(pred_DIDA)):
if pred_DIDA[i]=='NA':
xpred.append(np.nan)
else:
xpred.append(float(pred_DIDA[i]))
for i in range(0,len(pred_1kg)):
if pred_1kg[i]=='NA':
ypred.append(np.nan)
else:
ypred.append(float(pred_1kg[i]))
fig = figure()
mu1, std1 = stats.norm.fit(xpred)
mu2, std2 = stats.norm.fit(ypred)
bins = np.linspace(0, 100, 35)
plt.hist(xpred,bins,normed=True,alpha=0.3, color='r',label='Deleterious DIDA mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.hist(ypred,bins,normed=True,alpha=0.3, label='Neutral 1KGP mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2),color='blue')
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, xmax1, 100)
x2 = np.linspace(xmin2, xmax2, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'r', linewidth=2)
plt.plot(x2, p2, 'b', linewidth=2)
plt.xlabel('Aggregation conformation predicted values (amylogenic regions)')
plt.ylabel('log(Frequency)')
plt.xlim(0,100)
plt.ylim(1e-24,1e4)
plt.legend(loc='upper right')
plt.yscale('log')
fig.savefig('histo_waltz_DIDAVS1kg.png')
#MANN-WHITNEY:
stats.ranksums(xpred,ypred) # (U,p-value) = (2.8180561149873737, 0.0048315365001516033)
# Reject H0
# The distributions of two sets of variables have a difference
#-----------------------------------------------------------------------------
# AGGREGATION ENVT : COMPARISON deleterious DIDA mutants VS neutral 1KGP mutants
#-----------------------------------------------------------------------------
pred_DIDA = getColumn(file_DIDAmut,4,'\t')
pred_1kg = getColumn(file_mut,4,'\t')
pred_DIDA.pop(0)
pred_1kg.pop(0)
xpred,ypred=[],[]
for i in range(0,len(pred_DIDA)):
if pred_DIDA[i]=='NA':
xpred.append(np.nan)
else:
xpred.append(float(pred_DIDA[i]))
for i in range(0,len(pred_1kg)):
if pred_1kg[i]=='NA':
ypred.append(np.nan)
else:
ypred.append(float(pred_1kg[i]))
fig = figure()
mu1, std1 = stats.norm.fit(xpred)
mu2, std2 = stats.norm.fit(ypred)
bins = np.linspace(0, 100, 35)
plt.hist(xpred, bins, alpha=0.3, label='Deleterious DIDA mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu1, std1),normed=True,color='red')
plt.hist(ypred, bins, alpha=0.3, label='Neutral 1KGP mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2),normed=True,color='blue')
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, xmax1, 100)
x2 = np.linspace(xmin2, xmax2, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'r', linewidth=2)
plt.plot(x2, p2, 'b', linewidth=2)
plt.xlabel('Aggregation conformation predicted values (amylogenic regions)')
plt.ylabel('log(Frequency)')
plt.xlim(0,100)
plt.ylim(1e-24,1e4)
plt.legend(loc='upper right')
plt.yscale('log')
fig.savefig('histo_waltzenvt_DIDA1kg.png')
#MANN-WHITNEY:
stats.ranksums(xpred,ypred) # (U,p-value) = (3.0503709286051883, 0.0022855889607669021)
# Reject H0
# The distributions of two sets of variables have a difference
| mit |
bnaul/scikit-learn | build_tools/generate_authors_table.py | 14 | 5018 | """
This script generates an html table of contributors, with names and avatars.
The list is generated from scikit-learn's teams on GitHub, plus a small number
of hard-coded contributors.
The table should be updated for each new inclusion in the teams.
Generating the table requires admin rights.
"""
import sys
import requests
import getpass
import time
from pathlib import Path
from os import path
print("user:", file=sys.stderr)
user = input()
token = getpass.getpass("access token:\n")
auth = (user, token)
LOGO_URL = 'https://avatars2.githubusercontent.com/u/365630?v=4'
REPO_FOLDER = Path(path.abspath(__file__)).parent.parent
def get(url):
for sleep_time in [10, 30, 0]:
reply = requests.get(url, auth=auth)
api_limit = ("message" in reply.json()
and "API rate limit exceeded" in reply.json()["message"])
if not api_limit:
break
print("API rate limit exceeded, waiting..")
time.sleep(sleep_time)
reply.raise_for_status()
return reply
def get_contributors():
"""Get the list of contributor profiles. Require admin rights."""
# get core devs and triage team
core_devs = []
triage_team = []
for team_id, lst in zip((11523, 3593183), (core_devs, triage_team)):
for page in [1, 2]: # 30 per page
reply = get(
f"https://api.github.com/teams/{team_id}/members?page={page}"
)
lst.extend(reply.json())
# get members of scikit-learn on GitHub
members = []
for page in [1, 2]: # 30 per page
reply = get(
"https://api.github.com/orgs/scikit-learn/members?page=%d" %
(page, ))
members.extend(reply.json())
# keep only the logins
core_devs = set(c['login'] for c in core_devs)
triage_team = set(c['login'] for c in triage_team)
members = set(c['login'] for c in members)
# add missing contributors with GitHub accounts
members |= {'dubourg', 'mbrucher', 'thouis', 'jarrodmillman'}
# add missing contributors without GitHub accounts
members |= {'Angel Soler Gollonet'}
# remove CI bots
members -= {'sklearn-ci', 'sklearn-lgtm', 'sklearn-wheels'}
triage_team -= core_devs # remove ogrisel from triage_team
emeritus = members - core_devs - triage_team
# get profiles from GitHub
core_devs = [get_profile(login) for login in core_devs]
emeritus = [get_profile(login) for login in emeritus]
triage_team = [get_profile(login) for login in triage_team]
# sort by last name
core_devs = sorted(core_devs, key=key)
emeritus = sorted(emeritus, key=key)
triage_team = sorted(triage_team, key=key)
return core_devs, emeritus, triage_team
def get_profile(login):
"""Get the GitHub profile from login"""
print("get profile for %s" % (login, ))
try:
profile = get("https://api.github.com/users/%s" % login).json()
except requests.exceptions.HTTPError:
return dict(name=login, avatar_url=LOGO_URL, html_url="")
if profile["name"] is None:
profile["name"] = profile["login"]
# fix missing names
missing_names = {
'bthirion': 'Bertrand Thirion',
'dubourg': 'Vincent Dubourg',
'Duchesnay': 'Edouard Duchesnay',
'Lars': 'Lars Buitinck',
'MechCoder': 'Manoj Kumar',
}
if profile["name"] in missing_names:
profile["name"] = missing_names[profile["name"]]
return profile
def key(profile):
"""Get a sorting key based on the lower case last name, then firstname"""
components = profile["name"].lower().split(' ')
return " ".join([components[-1]] + components[:-1])
def generate_table(contributors):
lines = [
(".. raw :: html\n"),
(" <!-- Generated by generate_authors_table.py -->"),
(" <div class=\"sk-authors-container\">"),
(" <style>"),
(" img.avatar {border-radius: 10px;}"),
(" </style>"),
]
for contributor in contributors:
lines.append(" <div>")
lines.append(
" <a href='%s'><img src='%s' class='avatar' /></a> <br />" %
(contributor["html_url"], contributor["avatar_url"]))
lines.append(" <p>%s</p>" % (contributor["name"], ))
lines.append(" </div>")
lines.append(" </div>")
return '\n'.join(lines)
def generate_list(contributors):
lines = []
for contributor in contributors:
lines.append("- %s" % (contributor["name"], ))
return '\n'.join(lines)
if __name__ == "__main__":
core_devs, emeritus, triage_team = get_contributors()
with open(REPO_FOLDER / "doc" / "authors.rst", "w+") as rst_file:
rst_file.write(generate_table(core_devs))
with open(REPO_FOLDER / "doc" / "authors_emeritus.rst", "w+") as rst_file:
rst_file.write(generate_list(emeritus))
with open(REPO_FOLDER / "doc" / "triage_team.rst", "w+") as rst_file:
rst_file.write(generate_table(triage_team))
| bsd-3-clause |
RayMick/scikit-learn | sklearn/linear_model/least_angle.py | 61 | 54324 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
thekingofkings/urban-flow-analysis | python/multi_view_learning/multi_view_prediction.py | 2 | 18504 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 10 15:16:43 2016
@author: hxw186
Evaluate multi-view learning framework.
"""
import numpy as np
import statsmodels.api as sm
from sklearn.cross_validation import LeaveOneOut
from sklearn.preprocessing import scale
from sklearn.metrics.pairwise import cosine_similarity
import pickle
import sys
sys.path.append("../")
from graph_embedding import get_graph_embedding_features
from feature_evaluation import extract_raw_samples, leaveOneOut_error
# from nn_leaveOneOut import leaveOneOut_error
from Crime import Tract
from FeatureUtils import retrieve_income_features, retrieve_averge_house_price
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rc('pdf', fonttype=42)
np.set_printoptions(suppress=True)
N = 77
def generate_raw_samples(year=2012):
"""
Generate raw features for all samples.
Returns
-------
Y : Numpy.Array
Crime counts
D : Numpy.Array
Demo features
P : Numpy.Array
POI features
T : Numpy.Array
Taxi flow graph embedding
G : Numpy.Array
Geographic graph embedding
"""
Y, D, P, Tf, Gd = extract_raw_samples(year)
T = get_graph_embedding_features('taxi-CA-static.vec')
G = get_graph_embedding_features('geo-CA.vec')
return Y, D, P, T, G
def NBmodel(train_idx, Y, X):
"""
Train a negative binomial model
Return
------
nb_res : the trained negative binomial model.
y_bar : a numpy.array, the prediction on training samples
"""
nbm = sm.GLM(Y[train_idx], X[train_idx], family=sm.families.NegativeBinomial())
nb_res = nbm.fit()
return nb_res, nb_res.predict(X[train_idx])
def taxi_view_model(train_idx, Y, T):
return NBmodel(train_idx, Y, T)
def poi_view_model(train_idx, Y, P):
return NBmodel(train_idx, Y, P)
def demo_view_model(train_idx, Y, D):
return NBmodel(train_idx, Y, D)
def geo_view_model(train_idx, Y, G):
return NBmodel(train_idx, Y, G)
def mvl_fuse_function(models, train_idx, Y):
newX = np.ones((len(train_idx), 1))
for nb_res, y_train in models:
ytrain = y_train.reshape((len(y_train), 1))
newX = np.concatenate((newX, ytrain), axis=1)
lm = sm.GLM(Y[train_idx], newX, family=sm.families.Gaussian())
lm_res = lm.fit()
return lm_res
import unittest
class MVLTest(unittest.TestCase):
def test_generate_raw_samples(self):
Y, D, P, T, G = generate_raw_samples()
assert(Y.max() < 20000 and np.mean(Y) > 1000) # use crime rate
assert(Y.shape == (N,1))
assert(D.shape == (N,8))
assert(P.shape == (N,10))
assert(T.shape == (N,8))
assert(G.shape == (N,8))
def test_view_model_independently(self):
Y, D, P, T, G = generate_raw_samples()
loo = LeaveOneOut(len(Y))
T = sm.add_constant(T, prepend=False)
P = sm.add_constant(P, prepend=False)
D = sm.add_constant(D, prepend=False)
G = sm.add_constant(G, prepend=False)
ter = []
per = []
der = []
ger = []
for train_idx, test_idx in loo:
nbm, yp = taxi_view_model(train_idx, Y, T)
ybar = nbm.predict(T[test_idx])
ter.append(ybar - Y[test_idx])
nbm, yp = poi_view_model(train_idx, Y, P)
ybar = nbm.predict(P[test_idx])
per.append(ybar - Y[test_idx])
nbm, yp = demo_view_model(train_idx, Y, D)
ybar = nbm.predict(D[test_idx])
der.append(ybar - Y[test_idx])
nbm, yp = demo_view_model(train_idx, Y, G)
ybar = nbm.predict(G[test_idx])
ger.append(ybar - Y[test_idx])
tmre = np.mean(np.abs(ter)) / np.mean(Y)
print "Taxi MRE: {0}".format(tmre)
assert( tmre < 0.5 )
# self.visualize_prediction_error(ter, Y, "Taxi view")
pmre = np.mean(np.abs(per)) / np.mean(Y)
print "POI MRE: {0}".format(pmre)
assert( pmre < 0.8 )
# self.visualize_prediction_error(per, Y, "POI view")
dmre = np.mean(np.abs(der)) / np.mean(Y)
print "Demo MRE: {0}".format(dmre)
assert( dmre < 0.8 )
# self.visualize_prediction_error(der, Y, "Demo view")
gmre = np.mean(np.abs(ger)) / np.mean(Y)
print "Geo MRE: {0}".format(gmre)
assert( gmre < 0.5 )
# self.visualize_prediction_error(ger, Y, "Geo view")
def test_simple_concatenation_model(self):
"""
Test a simple concatenation model.
We concatenate the feature vectors from four different views into one
vector. Then train a NB model on this concatenated vector `X`.
"""
Y, D, P, T, G = generate_raw_samples(2013)
X = np.concatenate((D,P,G), axis=1)
# assert( X.shape == (N, 34) )
X = sm.add_constant(X, prepend=False)
loo = LeaveOneOut(len(Y))
er = []
for train_idx, test_idx in loo:
nbm, yp = NBmodel(train_idx, Y, X)
ybar = nbm.predict(X[test_idx])
y_error = ybar - Y[test_idx]
# if np.abs(y_error / Y[test_idx]) > 0.8:
# print test_idx, ybar, Y[test_idx]
er.append(y_error)
mre = np.mean(np.abs(er)) / np.mean(Y)
print "Simple combine model MRE: {0}".format(mre)
assert( mre > 0.235 )
# self.visualize_prediction_error(er, Y, "Concatenate multiple views")
def test_mvl_fuse_function(self):
Y, D, P, T, G = generate_raw_samples()
T = sm.add_constant(T, prepend=False)
P = sm.add_constant(P, prepend=False)
D = sm.add_constant(D, prepend=False)
G = sm.add_constant(G, prepend=False)
loo = LeaveOneOut(len(Y))
er = []
for train_idx, test_idx in loo:
tm = taxi_view_model(train_idx, Y, T)
pm = poi_view_model(train_idx, Y, P)
gm = geo_view_model(train_idx, Y, G)
dm = demo_view_model(train_idx, Y, D)
models = [tm, pm, gm, dm]
lm = mvl_fuse_function(models, train_idx, Y)
tm_test = tm[0].predict(T[test_idx])
pm_test = pm[0].predict(P[test_idx])
gm_test = gm[0].predict(G[test_idx])
dm_test = dm[0].predict(D[test_idx])
newX_test = np.array([1, tm_test, pm_test, gm_test, dm_test])
ybar = lm.predict(newX_test)
y_error = ybar - Y[test_idx]
# if np.abs(y_error / Y[test_idx]) > 0.8:
# print test_idx, ybar, Y[test_idx], newX_test
er.append(y_error)
mre = np.mean(np.abs(er)) / np.mean(Y)
print "MVL with linear fusion function MRE: {0}".format(mre)
# self.visualize_prediction_error(er, Y, "MVL linear combination")
def visualize_prediction_error(self, er, Y, title):
cas = Tract.createAllCAObjects()
import matplotlib.pyplot as plt
import descartes
fig = plt.figure()
ax = fig.add_subplot(111)
for k in cas:
re = er[k-1] / Y[k-1]
if re > 0.4:
c = 'r'
elif re < -0.4:
c = 'b'
else:
c = 'w'
cak = cas[k].polygon
ax.add_patch(descartes.PolygonPatch(cak, fc=c))
ax.annotate(str(k), [cak.centroid.x, cak.centroid.y])
ax.axis('equal')
ax.set_title(title)
fig.show()
def similarityMatrix(F):
assert F.shape[0] == N
M = np.zeros((N,N))
for i in range(N):
for j in range(i, N):
if i == j:
M[i,j] = 1
else:
# sim = cosine_similarity(F[i].reshape(1,-1), F[j].reshape(1,-1))
sim = np.dot(F[i], F[j].T)
M[i,j] = sim
M[j,i] = sim
return M
def keep_topk(M, k=4):
Mn = np.zeros(M.shape)
idx = np.argsort(M)[:, -k:]
for idx_r in idx:
Mn[:, idx] = M[:,idx]
return Mn
def evaluate_various_flow_features_with_concatenation_model(year, spatial):
Y, D, P, T, G = extract_raw_samples(int(year))
population = D[:,0]
Yh = pickle.load(open("../chicago-hourly-crime-{0}.pickle".format(year)))
Yh = Yh / population * 10000
assert Yh.shape == (24, N)
with open("CAflowFeatures.pickle") as fin:
mf = pickle.load(fin)
line = pickle.load(fin)
dwt = pickle.load(fin)
dws = pickle.load(fin)
hdge = pickle.load(fin)
mf_mre = []
mf_mae = []
line_mre = []
line_mae = []
dw_mre = []
dw_mae = []
for h in range(24):
print h
# MF models
Tmf = mf[h] # sum([e for e in mf.values()])
import nimfa
nmf = nimfa.Nmf(G, rank=4, max_iter=100) #, update="divergence", objective="conn", conn_change=50)
nmf_fit = nmf()
src = nmf_fit.basis()
dst = nmf_fit.coef()
Gmf = np.concatenate((src, dst.T), axis=1)
if spatial == "nospatial":
X = np.concatenate((D, P, Tmf), axis=1)
elif spatial == "onlyspatial":
X = np.concatenate((D, P, Gmf), axis=1)
elif spatial == "usespatial":
X = np.concatenate((D, P, Tmf, Gmf), axis=1)
mre, mae = leaveOneOut_eval(X, Yh[h,:].reshape((N,1)))
mf_mre.append(mre)
mf_mae.append(mae)
print "MF MRE: {0}".format(mre)
# LINE model
Tline = line[h] # sum([e for e in line.values()])
Gline = get_graph_embedding_features('geo_all.txt')
if spatial == "nospatial":
X = np.concatenate((D, P, Tline), axis=1)
elif spatial == "onlyspatial":
X = np.concatenate((D, P, Gline), axis=1)
elif spatial == "usespatial":
X = np.concatenate((D, P, Tline, Gline), axis=1)
mre, mae = leaveOneOut_eval(X, Yh[h,:].reshape((N,1)))
line_mre.append(mre)
line_mae.append(mae)
print "LINE_slotted MRE: {0}".format(mre)
# deepwalk
if spatial == 'nospatial':
TGdw = dwt[h] # sum([e for e in dw.values()])
elif spatial == 'onlyspatial':
TGdw = dws[h]
elif spatial == 'usespatial':
TGdw = hdge[h]
# TGdw = dw[h] # sum([e for e in dw.values()])
X = np.concatenate((D, P, TGdw), axis=1)
mre, mae = leaveOneOut_eval(X, Yh[h,:].reshape((N,1)))
dw_mre.append(mre)
dw_mae.append(mae)
print "HDGE MRE: {0}".format(mre)
return mf_mre, line_mre, dw_mre, mf_mae, line_mae, dw_mae
def evaluate_various_embedding_features_with_lag_model(year, spatial):
Y, D, P, T, G = extract_raw_samples(int(year))
# predict hourly crime
# population = D[:,0]
# Yh = pickle.load(open("../chicago-hourly-crime-{0}.pickle".format(year)))
# Yh = Yh / population * 10000
# predict average income
header, income = retrieve_income_features()
Yh = np.repeat(income[:,0,None], 24, axis=1)
Yh = Yh.T
# predict average house price
# Yh = retrieve_averge_house_price()
# Yh = np.repeat(Yh[:,None], 24, axis=1)
# Yh = Yh.T
assert Yh.shape == (24, N)
with open("CAflowFeatures.pickle") as fin:
mf = pickle.load(fin)
line = pickle.load(fin)
dwt = pickle.load(fin)
dws = pickle.load(fin)
hdge = pickle.load(fin)
mf_mre = []
mf_mae = []
line_mre = []
line_mae = []
dw_mre = []
dw_mae = []
for h in range(24):
print h
Yhat = Yh[h,:].reshape((N,1))
if spatial == "nospatial":
features_ = ['demo', 'poi', 'taxi']
elif spatial == "onlyspatial":
features_ = ['demo', 'poi', 'geo']
elif spatial == "usespatial":
features_ = ['demo', 'poi', 'geo', 'taxi']
else:
features_ = ["demo", "poi"]
# MF models
Tmf = mf[h] # sum([e for e in mf.values()])
import nimfa
nmf = nimfa.Nmf(G, rank=4, max_iter=100) #, update="divergence", objective="conn", conn_change=50)
nmf_fit = nmf()
src = nmf_fit.basis()
dst = nmf_fit.coef()
Gmf = np.concatenate((src, dst.T), axis=1)
mae, mre = leaveOneOut_error(Yhat, D, P, similarityMatrix(Tmf), Yhat,
keep_topk(similarityMatrix(Gmf), 20), Yhat, features=features_, taxi_norm="bydestination")
mf_mre.append(mre)
mf_mae.append(mae)
print "MF MRE: {0}".format(mre)
# LINE model
Tline = line[h] # sum([e for e in line.values()])
Gline = get_graph_embedding_features('geo_all.txt')
mae, mre = leaveOneOut_error(Yhat, D, P, similarityMatrix(Tline), Yhat,
keep_topk(similarityMatrix(Gline)), Yhat, features=features_, taxi_norm="bydestination")
line_mre.append(mre)
line_mae.append(mae)
print "LINE_slotted MRE: {0}".format(mre)
# deepwalk
# TGdw = dw[h] # sum([e for e in dw.values()])
mae, mre = leaveOneOut_error(Yhat, D, P, similarityMatrix(dwt[h]), Yhat,
similarityMatrix(dws[h]), Yhat, features=features_, #['demo', 'poi', 'geo'],
taxi_norm="none")
dw_mre.append(mre)
dw_mae.append(mae)
print "HDGE MRE: {0}".format(mre)
return mf_mre, line_mre, dw_mre, mf_mae, line_mae, dw_mae
def plot_hourly_evaluation(year):
r = pickle.load(open("embeddings-{0}.pickle".format(year)))
with open("../kdd16-eval-{0}.pickle".format(year)) as fin:
kdd_mae = pickle.load(fin)
kdd_mre = pickle.load(fin)
mf_mre = r[0]
mf_mae = r[3]
line_mre = r[1]
line_mae = r[4]
dge_mre = r[2]
dge_mae = r[5]
start = 6
plt.rc("axes", linewidth=2)
plt.figure(figsize=(8,6))
plt.plot(kdd_mre[start:], 'g--', lw=3)
plt.plot(mf_mre[start:], "r:", lw=4)
plt.plot(line_mre[start:], "y-.", lw=4)
plt.plot(dge_mre[start:], 'b-', lw=3)
plt.legend(["RAW", "MF", "LINE", "HDGE"], fontsize=20, loc='best') # bbox_to_anchor=(0.8, 0.4)) #
plt.xlabel("Hour in day", fontsize=20)
plt.ylabel("$MRE$", fontsize=24)
plt.tick_params(labelsize=18)
plt.axis([0, 17, 0.22, 0.45])
plt.gca().set_xticks([0,6,12,17])
plt.gca().set_xticklabels(("6:00", "12:00", "18:00", "23:00"))
plt.grid(b=True, axis="both", lw=1)
plt.savefig("hourly-eval-{0}.pdf".format(year))
def barPlot_crime_MRE():
N = 3
mres = np.array([
[0.3084, 0.32, 0.327, 0.292],
[0.3083, 0.321, 0.329, 0.288],
[0.322, 0.333, 0.3428, 0.3124]
])
maes = np.array([
[14.52, 15.12, 15.45, 13.79],
[12.79, 13.42, 13.76, 12.04],
[14.30, 14.74, 15.15, 13.82]
])
width=0.18
pos = np.arange(N)
plt.rc("axes", linewidth=2)
plt.figure(figsize=(8,6))
plt.bar(pos, mres[:,0], width, color='g')
plt.bar(pos+width, mres[:,1], width, color='r')
plt.bar(pos+width*2, mres[:,2], width, color='y')
plt.bar(pos+width*3, mres[:,3], width, color='b')
plt.axis([-0.3, 2.9, 0.26, 0.355])
plt.gca().set_xticks([0.27, 1.27, 2.27])
plt.gca().set_xticklabels(['2013', '2014', '2015'])
plt.tick_params(labelsize=16)
plt.legend(["$RAW$", "$MF$", "$LINE$", "$DGE_{flow}$"], ncol=2, fontsize=20, loc='best')
plt.xlabel("Year", fontsize=20)
plt.ylabel("$MRE$", fontsize=24)
plt.savefig("crime-mre.pdf")
plt.figure(figsize=(8,6))
plt.bar(pos, maes[:,0], width, color='g')
plt.bar(pos+width, maes[:,1], width, color='r')
plt.bar(pos+width*2, maes[:,2], width, color='y')
plt.bar(pos+width*3, maes[:,3], width, color='b')
plt.axis([-0.3, 2.9, 11, 17])
plt.gca().set_xticks([0.27, 1.27, 2.27])
plt.gca().set_xticklabels(['2013', '2014', '2015'])
plt.tick_params(labelsize=16)
plt.legend(["$RAW$", "$MF$", "$LINE$", "$DGE_{flow}$"], ncol=2, fontsize=20, loc='best')
plt.xlabel("Year", fontsize=20)
plt.ylabel("$MAE$", fontsize=24)
plt.savefig("crime-mae.pdf")
def plot_hourly_crime():
plt.rc("axes", linewidth=2)
plt.figure(figsize=(8,6))
for year in range(2013, 2016):
Y, D, P, T, G = extract_raw_samples(year)
population = D[:,0]
Yh = pickle.load(open("../chicago-hourly-crime-{0}.pickle".format(year)))
Yh = Yh / population * 10000
if year == 2015:
Yh = Yh * 2
plt.plot(Yh.mean(axis=1), lw=3)
plt.legend(["2013", "2014", "2015"], fontsize=20, loc='best')
plt.xlabel("Hour in day", fontsize=20)
plt.ylabel("Average crime rate", fontsize=24)
plt.axis([0,23,10,70])
plt.gca().set_xticks([0,6,12,18,23])
plt.gca().set_xticklabels(("0:00", "6:00", "12:00", "18:00", "23:00"))
plt.grid(b=True, axis="both", lw=1)
plt.tick_params(labelsize=18)
plt.savefig("crime-rate-hourly.pdf")
def leaveOneOut_eval(X, Y):
X = sm.add_constant(X, prepend=False)
loo = LeaveOneOut(len(Y))
er = []
for train_idx, test_idx in loo:
nbm, yp = NBmodel(train_idx, Y, X)
ybar = nbm.predict(X[test_idx])
y_error = np.abs(ybar - Y[test_idx])
if y_error > 20 * Y[test_idx]:
print test_idx, y_error, Y[test_idx]
continue
er.append(y_error)
max_idx = np.argmax(er)
print "largest error", er[max_idx], Y[max_idx], max_idx+1
mae = np.mean(er)
mre = mae / np.mean(Y)
return mre, mae
if __name__ == '__main__':
# unittest.main()
# year = sys.argv[1]
# r = evaluate_various_flow_features_with_concatenation_model(year, sys.argv[2]) # year and spatial
# r = evaluate_various_embedding_features_with_lag_model(year, sys.argv[2])
## pickle.dump(r, open("embeddings-{0}.pickle".format(year), "w"))
# print np.mean(r, axis=1)
# plot_hourly_evaluation(2014)
barPlot_crime_MRE()
# plot_hourly_crime()
| mit |
rahlk/WarnPlan | warnplan/commons/tools/Discretize.py | 1 | 5764 | """
An instance filter that discretizes a range of numeric attributes in the dataset into nominal attributes. Discretization is by Fayyad & Irani's MDL method (the default).
For more information, see:
Usama M. Fayyad, Keki B. Irani: Multi-interval discretization of continuous valued attributes for classification learning. In: Thirteenth International Joint Conference on Artificial Intelligence, 1022-1027, 1993.
Igor Kononenko: On Biases in Estimating Multi-Valued Attributes. In: 14th International Joint Conference on Articial Intelligence, 1034-1040, 1995.
Dougherty, James, Ron Kohavi, and Mehran Sahami. "Supervised and unsupervised discretization of continuous features." Machine learning: proceedings of the twelfth international conference. Vol. 12. 1995.
"""
from __future__ import division, print_function
from misc import *
import numpy as np
import pandas as pd
from pdb import set_trace
from collections import Counter
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier as CART
def fWeight(tbl):
"""
Sort features based on entropy
"""
clf = CART(criterion='entropy')
features = tbl.columns[:-1]
klass = tbl[tbl.columns[-1]]
try:
clf.fit(tbl[features], [k == True for k in klass])
lbs = clf.feature_importances_
except ValueError:
set_trace()
return [tbl.columns[i] for i in np.argsort(lbs)[::-1]]
def discretize(feature, klass, atleast=-1, discrete=False):
"""
Recursive Minimal Entropy Discretization
````````````````````````````````````````
Inputs:
feature: A list or a numpy array of continuous attributes
klass: A list, or a numpy array of discrete class labels.
atleast: minimum splits.
Outputs:
splits: A list containing suggested spilt locations
"""
def measure(x):
def ent(x):
C = Counter(x)
N = len(x)
return sum([-C[n] / N * np.log(C[n] / N) for n in C.keys()])
def stdev(x):
if np.isnan(np.var(x) ** 0.5):
return 0
return np.var(x) ** 0.5
if not discrete:
return ent(x)
else:
return stdev(x)
# Sort features and klass
feature, klass = sorted(feature), [k for (f, k) in
sorted(zip(feature, klass))]
splits = []
gain = []
lvl = 0
def redo(feature, klass, lvl):
if len(feature) > 0:
E = measure(klass)
N = len(klass)
T = [] # Record boundaries of splits
for k in xrange(len(feature)):
west, east = feature[:k], feature[k:]
k_w, k_e = klass[:k], klass[k:]
N_w, N_e = len(west), len(east)
T += [N_w / N * measure(k_w) + N_e / N * measure(k_e)]
T_min = np.argmin(T)
left, right = feature[:T_min], feature[T_min:]
k_l, k_r = klass[:T_min], klass[T_min:]
# set_trace()
def stop(k, k_l, k_r):
gain = E - T[T_min]
def count(lst): return len(Counter(lst).keys())
delta = np.log2(float(3 ** count(k) - 2)) - (
count(k) * measure(k) - count(k_l) * measure(k_l) - count(
k_r) * measure(k_r))
# print(gain, (np.log2(N-1)+delta)/N)
return gain < (np.log2(N - 1) + delta) / N or T_min == 0
if stop(klass, k_l, k_r) and lvl >= atleast:
if discrete:
splits.append(T_min)
else:
splits.append(feature[T_min])
else:
_ = redo(feature=left, klass=k_l, lvl=lvl + 1)
_ = redo(feature=right, klass=k_r, lvl=lvl + 1)
# ------ main ------
redo(feature, klass, lvl=0)
# set_trace()
return splits
def _test0():
"A Test Function"
test = np.random.normal(0, 10, 1000).tolist()
klass = [int(abs(i)) for i in np.random.normal(0, 1, 1000)]
splits = discretize(feature=test, klass=klass)
set_trace()
def _test1():
tbl_loc = explore(name='ant')[0]
tbl = csv2DF(tbl_loc)
new = discreteTbl(tbl)
set_trace()
def discreteTbl(tbl, B=0.33, Prune=True):
"""
Discretize a table
``````````````````
Columns 1 to N-1 represent the independent attributes, column N the dependent.
Parameters:
tbl - A Pandas data.dat Frame
B - Cutoff for Pruning Columns (float between 0,1)
Prune - Prune (True/False)
Returns:
Pandas data.dat Frame: Discretized table
"""
dtable = []
fweight = fWeight(tbl)
for i, name in enumerate(tbl.columns[:-1]):
new = []
feature = tbl[name].values
klass = tbl[tbl.columns[-1]].values
splits = discretize(feature, klass)
LO, HI = min(feature), max(feature)
cutoffs = sorted(list(set(splits + [LO, HI])))
def pairs(lst):
while len(lst) > 1:
yield (lst.pop(0), lst[0])
cutoffs = [t for t in pairs(sorted(list(set(splits + [LO, HI]))))]
for f in feature:
for n in cutoffs:
if n[0] <= f < n[1]:
new.append(n)
elif f == n[1] == HI:
new.append((n[0], HI))
dtable.append(new)
dtable.append(klass.tolist())
dtable = pd.DataFrame(dtable).T
dtable.columns = tbl.columns
ranks = fWeight(tbl)
if Prune:
return dtable[ranks[:int(len(ranks) * B)] + [tbl.columns[-1]]]
else:
return dtable[ranks + [tbl.columns[-1]]]
if __name__ == '__main__':
_test0()
pass
| mit |
analysiscenter/dataset | setup.py | 1 | 1724 | """
BatchFlow helps you conveniently work with random or sequential batches of your data
and define data processing and machine learning workflows even for datasets that do not fit into memory.
Documentation - https://analysiscenter.github.io/batchflow/
"""
import re
from setuptools import setup, find_packages
with open('batchflow/__init__.py', 'r') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='batchflow',
packages=find_packages(exclude=['examples']),
version=version,
url='https://github.com/analysiscenter/batchflow',
license='Apache License 2.0',
author='Roman Kh at al',
author_email='[email protected]',
description='A framework for fast data processing and ML models training',
long_description=long_description,
long_description_content_type="text/markdown",
zip_safe=False,
platforms='any',
install_requires=[
'numpy>=1.10',
'pandas>=0.24',
'dill>=0.2.7',
'tqdm>=4.19.7',
'scipy>=0.19.1',
'scikit-image>=0.13.1',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering'
],
)
| apache-2.0 |
fps7806/Graph-CNN | src/graphcnn/experiment.py | 1 | 25438 | from graphcnn.helper import *
from graphcnn.network import *
from graphcnn.layers import *
from sklearn.model_selection import KFold
import numpy as np
import tensorflow as tf
import glob
import time
from tensorflow.python.training import queue_runner
# This function is used to create tf.cond compatible tf.train.batch alternative
def _make_batch_queue(input, capacity, num_threads=1):
queue = tf.PaddingFIFOQueue(capacity=capacity, dtypes=[s.dtype for s in input], shapes=[s.get_shape() for s in input])
tf.summary.scalar("fraction_of_%d_full" % capacity,
tf.cast(queue.size(), tf.float32) *
(1. / capacity))
enqueue_ops = [queue.enqueue(input)]*num_threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
return queue
# This class is responsible for setting up and running experiments
# Also provides helper functions related to experiments (e.g. get accuracy)
class GraphCNNExperiment(object):
def __init__(self, dataset_name, model_name, net_constructor):
# Initialize all defaults
self.dataset_name = dataset_name
self.model_name = model_name
self.num_iterations = 200
self.iterations_per_test = 5
self.display_iter = 5
self.snapshot_iter = 1000000
self.train_batch_size = 0
self.test_batch_size = 0
self.crop_if_possible = True
self.debug = False
self.starter_learning_rate = 0.1
self.learning_rate_exp = 0.1
self.learning_rate_step = 1000
self.reports = {}
self.silent = False
self.optimizer = 'momentum'
self.net_constructor = net_constructor
self.net = GraphCNNNetwork()
self.net_desc = GraphCNNNetworkDescription()
tf.reset_default_graph()
# print_ext can be disabled through the silent flag
def print_ext(self, *args):
if self.silent == False:
print_ext(*args)
# Will retrieve the value stored as the maximum test accuracy on a trained network
# SHOULD ONLY BE USED IF test_batch_size == ALL TEST SAMPLES
def get_max_accuracy(self):
tf.reset_default_graph()
with tf.variable_scope('loss') as scope:
max_acc_test = tf.Variable(tf.zeros([]), name="max_acc_test")
saver = tf.train.Saver()
with tf.Session() as sess:
max_it = self.load_model(sess, saver)
return sess.run(max_acc_test), max_it
# Run all folds in a CV and calculate mean/std
def run_kfold_experiments(self, no_folds=10):
acc = []
self.net_constructor.create_network(self.net_desc, [])
desc = self.net_desc.get_description()
self.print_ext('Running CV for:', desc)
start_time = time.time()
for i in range(no_folds):
tf.reset_default_graph()
self.set_kfold(no_folds=no_folds, fold_id=i)
cur_max, max_it = self.run()
self.print_ext('Fold %d max accuracy: %g at %d' % (i, cur_max, max_it))
acc.append(cur_max)
acc = np.array(acc)
mean_acc= np.mean(acc)*100
std_acc = np.std(acc)*100
self.print_ext('Result is: %.2f (+- %.2f)' % (mean_acc, std_acc))
verify_dir_exists('./results/')
with open('./results/%s.txt' % self.dataset_name, 'a+') as file:
file.write('%s\t%s\t%d-fold\t%d seconds\t%.2f (+- %.2f)\n' % (str(datetime.now()), desc, no_folds, time.time()-start_time, mean_acc, std_acc))
return mean_acc, std_acc
# Prepares samples for experiment, accepts a list (vertices, adjacency, labels) where:
# vertices = list of NxC matrices where C is the same over all samples, N can be different between samples
# adjacency = list of NxLxN tensors containing L NxN adjacency matrices of the given samples
# labels = list of sample labels
# len(vertices) == len(adjacency) == len(labels)
def preprocess_data(self, dataset):
self.graph_size = np.array([s.shape[0] for s in dataset[0]]).astype(np.int64)
self.largest_graph = max(self.graph_size)
self.print_ext('Padding samples')
self.graph_vertices = []
self.graph_adjacency = []
for i in range(len(dataset[0])):
# pad all vertices to match size
self.graph_vertices.append(np.pad(dataset[0][i].astype(np.float32), ((0, self.largest_graph-dataset[0][i].shape[0]), (0, 0)), 'constant', constant_values=(0)))
# pad all adjacency matrices to match size
self.graph_adjacency.append(np.pad(dataset[1][i].astype(np.float32), ((0, self.largest_graph-dataset[1][i].shape[0]), (0, 0), (0, self.largest_graph-dataset[1][i].shape[0])), 'constant', constant_values=(0)))
self.print_ext('Stacking samples')
self.graph_vertices = np.stack(self.graph_vertices, axis=0)
self.graph_adjacency = np.stack(self.graph_adjacency, axis=0)
self.graph_labels = dataset[2].astype(np.int64)
self.no_samples = self.graph_labels.shape[0]
single_sample = [self.graph_vertices, self.graph_adjacency, self.graph_labels, self.graph_size]
# Create CV information
def set_kfold(self, no_folds = 10, fold_id = 0):
inst = KFold(n_splits = no_folds, shuffle=True, random_state=125)
self.fold_id = fold_id
self.KFolds = list(inst.split(np.arange(self.no_samples)))
self.train_idx, self.test_idx = self.KFolds[fold_id]
self.no_samples_train = self.train_idx.shape[0]
self.no_samples_test = self.test_idx.shape[0]
self.print_ext('Data ready. no_samples_train:', self.no_samples_train, 'no_samples_test:', self.no_samples_test)
if self.train_batch_size == 0:
self.train_batch_size = self.no_samples_train
if self.test_batch_size == 0:
self.test_batch_size = self.no_samples_test
self.train_batch_size = min(self.train_batch_size, self.no_samples_train)
self.test_batch_size = min(self.test_batch_size, self.no_samples_test)
# This function is cropped before batch
# Slice each sample to improve performance
def crop_single_sample(self, single_sample):
vertices = tf.slice(single_sample[0], np.array([0, 0], dtype=np.int64), tf.cast(tf.stack([single_sample[3], -1]), tf.int64))
vertices.set_shape([None, self.graph_vertices.shape[2]])
adjacency = tf.slice(single_sample[1], np.array([0, 0, 0], dtype=np.int64), tf.cast(tf.stack([single_sample[3], -1, single_sample[3]]), tf.int64))
adjacency.set_shape([None, self.graph_adjacency.shape[2], None])
# V, A, labels, mask
return [vertices, adjacency, single_sample[2], tf.expand_dims(tf.ones(tf.slice(tf.shape(vertices), [0], [1])), axis=-1)]
def create_input_variable(self, input):
for i in range(len(input)):
placeholder = tf.placeholder(tf.as_dtype(input[i].dtype), shape=input[i].shape)
var = tf.Variable(placeholder, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
self.variable_initialization[placeholder] = input[i]
input[i] = var
return input
# Create input_producers and batch queues
def create_data(self):
with tf.device("/cpu:0"):
with tf.variable_scope('input') as scope:
# Create the training queue
with tf.variable_scope('train_data') as scope:
self.print_ext('Creating training Tensorflow Tensors')
# Create tensor with all training samples
training_samples = [self.graph_vertices, self.graph_adjacency, self.graph_labels, self.graph_size]
training_samples = [s[self.train_idx, ...] for s in training_samples]
if self.crop_if_possible == False:
training_samples[3] = get_node_mask(training_samples[3], max_size=self.graph_vertices.shape[1])
# Create tf.constants
training_samples = self.create_input_variable(training_samples)
# Slice first dimension to obtain samples
single_sample = tf.train.slice_input_producer(training_samples, shuffle=True, capacity=self.train_batch_size)
# Cropping samples improves performance but is not required
if self.crop_if_possible:
self.print_ext('Cropping smaller graphs')
single_sample = self.crop_single_sample(single_sample)
# creates training batch queue
train_queue = _make_batch_queue(single_sample, capacity=self.train_batch_size*2, num_threads=6)
# Create the test queue
with tf.variable_scope('test_data') as scope:
self.print_ext('Creating test Tensorflow Tensors')
# Create tensor with all test samples
test_samples = [self.graph_vertices, self.graph_adjacency, self.graph_labels, self.graph_size]
test_samples = [s[self.test_idx, ...] for s in test_samples]
# If using mini-batch we will need a queue
if self.test_batch_size != self.no_samples_test:
if self.crop_if_possible == False:
test_samples[3] = get_node_mask(test_samples[3], max_size=self.graph_vertices.shape[1])
test_samples = self.create_input_variable(test_samples)
single_sample = tf.train.slice_input_producer(test_samples, shuffle=True, capacity=self.test_batch_size)
if self.crop_if_possible:
single_sample = self.crop_single_sample(single_sample)
test_queue = _make_batch_queue(single_sample, capacity=self.test_batch_size*2, num_threads=1)
# If using full-batch no need for queues
else:
test_samples[3] = get_node_mask(test_samples[3], max_size=self.graph_vertices.shape[1])
test_samples = self.create_input_variable(test_samples)
# obtain batch depending on is_training and if test is a queue
if self.test_batch_size == self.no_samples_test:
return tf.cond(self.net.is_training, lambda: train_queue.dequeue_many(self.train_batch_size), lambda: test_samples)
return tf.cond(self.net.is_training, lambda: train_queue.dequeue_many(self.train_batch_size), lambda: test_queue.dequeue_many(self.test_batch_size))
# Function called with the output of the Graph-CNN model
# Should add the loss to the 'losses' collection and add any summaries needed (e.g. accuracy)
def create_loss_function(self):
with tf.variable_scope('loss') as scope:
self.print_ext('Creating loss function and summaries')
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.net.current_V, labels=self.net.labels))
correct_prediction = tf.cast(tf.equal(tf.argmax(self.net.current_V, 1), self.net.labels), tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
# we have 2 variables that will keep track of the best accuracy obtained in training/testing batch
# SHOULD ONLY BE USED IF test_batch_size == ALL TEST SAMPLES
self.max_acc_train = tf.Variable(tf.zeros([]), name="max_acc_train")
self.max_acc_test = tf.Variable(tf.zeros([]), name="max_acc_test")
max_acc = tf.cond(self.net.is_training, lambda: tf.assign(self.max_acc_train, tf.maximum(self.max_acc_train, accuracy)), lambda: tf.assign(self.max_acc_test, tf.maximum(self.max_acc_test, accuracy)))
tf.add_to_collection('losses', cross_entropy)
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('max_accuracy', max_acc)
tf.summary.scalar('cross_entropy', cross_entropy)
# if silent == false display these statistics:
self.reports['accuracy'] = accuracy
self.reports['max acc.'] = max_acc
self.reports['cross_entropy'] = cross_entropy
# check if the model has a saved iteration and return the latest iteration step
def check_model_iteration(self):
latest = tf.train.latest_checkpoint(self.snapshot_path)
if latest == None:
return 0
return int(latest[len(self.snapshot_path + 'model-'):])
# load_model if any checkpoint exist
def load_model(self, sess, saver, ):
latest = tf.train.latest_checkpoint(self.snapshot_path)
if latest == None:
return 0
saver.restore(sess, latest)
i = int(latest[len(self.snapshot_path + 'model-'):])
self.print_ext("Model restored at %d." % i)
return i
def save_model(self, sess, saver, i):
latest = tf.train.latest_checkpoint(self.snapshot_path)
if latest == None or i != int(latest[len(self.snapshot_path + 'model-'):]):
self.print_ext('Saving model at %d' % i)
verify_dir_exists(self.snapshot_path)
result = saver.save(sess, self.snapshot_path + 'model', global_step=i)
self.print_ext('Model saved to %s' % result)
# Create graph (input, network, loss)
# Handle checkpoints
# Report summaries if silent == false
# start/end threads
def run(self):
self.variable_initialization = {}
self.print_ext('Training model "%s"!' % self.model_name)
if hasattr(self, 'fold_id') and self.fold_id:
self.snapshot_path = './snapshots/%s/%s/' % (self.dataset_name, self.model_name + '_fold%d' % self.fold_id)
self.test_summary_path = './summary/%s/test/%s_fold%d' %(self.dataset_name, self.model_name, self.fold_id)
self.train_summary_path = './summary/%s/train/%s_fold%d' %(self.dataset_name, self.model_name, self.fold_id)
else:
self.snapshot_path = './snapshots/%s/%s/' % (self.dataset_name, self.model_name)
self.test_summary_path = './summary/%s/test/%s' %(self.dataset_name, self.model_name)
self.train_summary_path = './summary/%s/train/%s' %(self.dataset_name, self.model_name)
if self.debug:
i = 0
else:
i = self.check_model_iteration()
if i < self.num_iterations:
self.print_ext('Creating training network')
self.net.is_training = tf.placeholder(tf.bool, shape=())
self.net.global_step = tf.Variable(0,name='global_step',trainable=False)
input = self.create_data()
self.net_constructor.create_network(self.net, input)
self.create_loss_function()
self.print_ext('Preparing training')
loss = tf.add_n(tf.get_collection('losses'))
if len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) > 0:
loss += tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
if self.optimizer == 'adam':
train_step = tf.train.AdamOptimizer().minimize(loss, global_step=self.net.global_step)
else:
self.learning_rate = tf.train.exponential_decay(self.starter_learning_rate, self.net.global_step, self.learning_rate_step, self.learning_rate_exp, staircase=True)
train_step = tf.train.MomentumOptimizer(self.learning_rate, 0.9).minimize(loss, global_step=self.net.global_step)
self.reports['lr'] = self.learning_rate
tf.summary.scalar('learning_rate', self.learning_rate)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer(), self.variable_initialization)
if self.debug == False:
saver = tf.train.Saver()
self.load_model(sess, saver)
self.print_ext('Starting summaries')
test_writer = tf.summary.FileWriter(self.test_summary_path, sess.graph)
train_writer = tf.summary.FileWriter(self.train_summary_path, sess.graph)
summary_merged = tf.summary.merge_all()
self.print_ext('Starting threads')
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
self.print_ext('Starting training. train_batch_size:', self.train_batch_size, 'test_batch_size:', self.test_batch_size)
wasKeyboardInterrupt = False
try:
total_training = 0.0
total_testing = 0.0
start_at = time.time()
last_summary = time.time()
while i < self.num_iterations:
if i % self.snapshot_iter == 0 and self.debug == False:
self.save_model(sess, saver, i)
if i % self.iterations_per_test == 0:
start_temp = time.time()
summary, reports = sess.run([summary_merged, self.reports], feed_dict={self.net.is_training:0})
total_testing += time.time() - start_temp
self.print_ext('Test Step %d Finished' % i)
for key, value in reports.items():
self.print_ext('Test Step %d "%s" = ' % (i, key), value)
if self.debug == False:
test_writer.add_summary(summary, i)
start_temp = time.time()
summary, _, reports = sess.run([summary_merged, train_step, self.reports], feed_dict={self.net.is_training:1})
total_training += time.time() - start_temp
i += 1
if ((i-1) % self.display_iter) == 0:
if self.debug == False:
train_writer.add_summary(summary, i-1)
total = time.time() - start_at
self.print_ext('Training Step %d Finished Timing (Training: %g, Test: %g) after %g seconds' % (i-1, total_training/total, total_testing/total, time.time()-last_summary))
for key, value in reports.items():
self.print_ext('Training Step %d "%s" = ' % (i-1, key), value)
last_summary = time.time()
if (i-1) % 100 == 0:
total_training = 0.0
total_testing = 0.0
start_at = time.time()
if i % self.iterations_per_test == 0:
summary = sess.run(summary_merged, feed_dict={self.net.is_training:0})
if self.debug == False:
test_writer.add_summary(summary, i)
self.print_ext('Test Step %d Finished' % i)
except KeyboardInterrupt as err:
self.print_ext('Training interrupted at %d' % i)
wasKeyboardInterrupt = True
raisedEx = err
finally:
if i > 0 and self.debug == False:
self.save_model(sess, saver, i)
self.print_ext('Training completed, starting cleanup!')
coord.request_stop()
coord.join(threads)
self.print_ext('Cleanup completed!')
if wasKeyboardInterrupt:
raise raisedEx
return sess.run([self.max_acc_test, self.net.global_step])
else:
self.print_ext('Model "%s" already trained!' % self.model_name)
return self.get_max_accuracy()
# SingleGraphCNNExperiment overloads GraphCNNExperiment to support single graph samples (e.g. Cora)
# BatchNormalization during test follows same behavior as training
# Loss function requires a mask that selects samples to report accuracy on.
class SingleGraphCNNExperiment(GraphCNNExperiment):
def preprocess_data(self, dataset):
self.largest_graph = dataset[0].shape[0]
self.graph_size = [self.largest_graph]
self.graph_vertices = np.expand_dims(dataset[0].astype(np.float32), axis=0)
self.graph_adjacency = np.expand_dims(dataset[1].astype(np.float32), axis=0)
self.graph_labels = np.expand_dims(dataset[2].astype(np.int64), axis=0)
self.no_samples = self.graph_labels.shape[1]
single_sample = [self.graph_vertices, self.graph_adjacency, self.graph_labels, self.graph_size]
def make_batchnorm_layer(self, name=None):
axis = -1
with tf.variable_scope(name, default_name='BatchNorm') as scope:
input_size = self.current_V.get_shape()[axis].value
if axis == -1:
axis = len(self.current_V.get_shape())-1
axis_arr = [i for i in range(len(self.current_V.get_shape())) if i != axis]
batch_mean, batch_var = tf.nn.moments(self.current_V, axis_arr)
gamma = make_variable('gamma', input_size, initializer=tf.constant_initializer(1))
beta = make_bias_variable('bias', input_size)
self.current_V = tf.nn.batch_normalization(self.current_V, batch_mean, batch_var, beta, gamma, 1e-3)
return self.current_V
def create_data(self):
with tf.device("/cpu:0"):
with tf.variable_scope('input') as scope:
self.print_ext('Creating training Tensorflow Tensors')
vertices = self.graph_vertices[:, self.train_idx, :]
adjacency = self.graph_adjacency[:, self.train_idx, :, :]
adjacency = adjacency[:, :, :, self.train_idx]
labels = self.graph_labels[:, self.train_idx]
input_mask = np.ones([1, len(self.train_idx), 1]).astype(np.float32)
train_input = [vertices, adjacency, labels, input_mask]
train_input = self.create_input_variable(train_input)
vertices = self.graph_vertices
adjacency = self.graph_adjacency
labels = self.graph_labels
input_mask = np.zeros([1, self.largest_graph, 1]).astype(np.float32)
input_mask[:, self.test_idx, :] = 1
test_input = [vertices, adjacency, labels, input_mask]
test_input = self.create_input_variable(test_input)
return tf.cond(self.net.is_training, lambda: train_input, lambda: test_input)
def create_loss_function(self):
self.print_ext('Creating loss function and summaries')
with tf.variable_scope('loss') as scope:
inv_sum = (1./tf.reduce_sum(self.net.current_mask))
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.net.current_V, labels=self.net.labels)
cross_entropy = tf.multiply(tf.squeeze(self.net.current_mask), tf.squeeze(cross_entropy))
cross_entropy = tf.reduce_sum(cross_entropy)*inv_sum
correct_prediction = tf.cast(tf.equal(tf.argmax(self.net.current_V, 2), self.net.labels), tf.float32)
correct_prediction = tf.multiply(tf.squeeze(self.net.current_mask), tf.squeeze(correct_prediction))
accuracy = tf.reduce_sum(correct_prediction)*inv_sum
tf.add_to_collection('losses', cross_entropy)
tf.summary.scalar('loss', cross_entropy)
self.max_acc_train = tf.Variable(tf.zeros([]), name="max_acc_train")
self.max_acc_test = tf.Variable(tf.zeros([]), name="max_acc_test")
max_acc = tf.cond(self.net.is_training, lambda: tf.assign(self.max_acc_train, tf.maximum(self.max_acc_train, accuracy)), lambda: tf.assign(self.max_acc_test, tf.maximum(self.max_acc_test, accuracy)))
tf.summary.scalar('max_accuracy', max_acc)
tf.summary.scalar('accuracy', accuracy)
self.reports['accuracy'] = accuracy
self.reports['max acc.'] = max_acc
self.reports['cross_entropy'] = cross_entropy
| mit |
ArchaeoPY/ArchaeoPY-Modeller | Res/res3d.py | 1 | 10071 | #def res3D(array, a, a1, a2, x, contrast, )
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
from itertools import izip, product
#Array Info
array = 'tp_broad' #Select from one of the array's above
ps = 1 #for twin-probe, wenner, square, and trapezoid (side 1) arrays. From 0.1 to 5
ps1 = 3 #for trapezoid array (side 2). From 0.1 to 5
ps2 = 3 #for trapezoid array (separation of sides) From 0.1 to 5
#Grid Info
x_length = 20
x_step = 0.25
y_length = 20
y_step = 0.25
#Sphere Info
insulating = 1.0e+6
conducting = 1.0e-6
rho_sphere = insulating #resistivity of sphere: insulating or conducting
radius = 0.25 * ps #relative radius
diameter = 2 * radius #relative sphere diameter
sphere_x = 10
sphere_y = 10
sphere_z = 2
#Other
rho_background = 1.0
contrast = (rho_sphere - rho_background) / (2 * rho_sphere + rho_background)
sphere_xyz = np.array([sphere_x, sphere_y, sphere_z])
#print sphere_xyz[0]
'''Grid Computations'''
#Creates a grid of (x, y) measurements positions
#assuming level surface, z = 0
x_grid_pos = np.arange(np.divide(x_step,2.0), x_length, x_step)
y_grid_pos = np.arange(np.divide(y_step,2.0), y_length, y_step)
xgrid, ygrid = np.meshgrid(x_grid_pos, y_grid_pos)
if array == 'wenner_long':
C1_x = xgrid #X Grid positions of C1 electrode
C1_y = np.add(ygrid, 1.5 * ps)
C1_z = np.zeros(C1_x.shape)
P1_x = xgrid
P1_y = np.add(ygrid, 0.5 * ps)
P1_z = np.zeros(P1_x.shape)
P2_x = xgrid
P2_y = np.subtract(ygrid, 0.5 * ps)
P2_z = np.zeros(P2_x.shape)
c1p1_p = ps
c1p2_p = 2.0 * ps
p1c2_p = 2.0 * ps
p2c2_p = ps
rel_p = 2.0 #array scaling factor1
if array == 'wenner_broad':
C1_x = np.add(xgrid, 1.5 * ps)
C1_y = ygrid
C1_z = np.zeros(C1_x.shape)
P1_x = np.add(xgrid, 0.5 * ps)
P1_y = ygrid
P1_z = np.zeros(P1_x.shape)
P2_x = np.subtract(xgrid, 0.5 * ps)
P2_y = ygrid
P2_z = np.zeros(P2_x.shape)
c1p1_p = ps
c1p2_p = 2.0 * ps
p1c2_p = 2.0 * ps
p2c2_p = ps
rel_p = 2.0 #array scaling factor1
if array == 'tp_long':
C1_x = xgrid
C1_y = np.add(ygrid, ps / 2.0)
C1_z = np.zeros(C1_x.shape)
P1_x = xgrid
P1_y = np.subtract(ygrid, ps / 2.0)
P1_z = np.zeros(P1_x.shape)
P2_x = xgrid
P2_y = np.subtract(100.5 * ps, ygrid)
P2_z = np.zeros(P2_x.shape)
c1p1_p = ps
c1p2_p = 101 * ps
p1c2_p = 101 * ps
p2c2_p = ps
rel_p = 1.0 #array scaling factor1
if array == 'tp_broad':
C1_x = np.add(xgrid, ps / 2.0)
C1_y = ygrid
C1_z = np.zeros(C1_x.shape)
P1_x = np.add(xgrid, ps / 2.0)
P1_y = ygrid
P1_z = np.zeros(P1_x.shape)
P2_x = np.subtract(100.5 * ps, xgrid)
P2_y = ygrid
P2_z = np.zeros(P2_x.shape)
c1p1_p = ps
c1p2_p = 101 * ps
p1c2_p = 101 * ps
p2c2_p = ps
rel_p = 1.0 #array scaling factor1
if array == 'square_a':
C1_x = np.subtract(xgrid, ps / 2.0)
C1_y = np.add(ygrid, ps / 2.0)
C1_z = np.zeros(C1_x.shape)
P1_x = np.add(xgrid, ps / 2.0)
P1_y = np.add(ygrid, ps / 2.0)
P1_z = np.zeros(P1_x.shape)
P2_x = np.add(xgrid, ps / 2.0)
P2_y = np.subtract(ygrid, ps / 2.0)
P2_z = np.zeros(P2_x.shape)
c1p1_p = ps
c1p2_p = np.sqrt(2) * ps
p1c2_p = np.sqrt(2) * ps
p2c2_p = ps
rel_p = np.sqrt(2.0)/(np.sqrt(2.0)-1.0) #array scaling factor1
if array == 'square_b': #Square Array: Beta Config
C1_x = np.subtract(xgrid, ps / 2.0)
C1_y = np.add(ygrid, ps / 2.0)
C1_z = np.zeros(C1_x.shape)
P1_x = np.subtract(xgrid, ps / 2.0)
P1_y = np.subtract(ygrid, ps / 2.0)
P1_z = np.zeros(P1_x.shape)
P2_x = np.add(xgrid, ps / 2.0)
P2_y = np.subtract(ygrid, ps / 2.0)
P2_z = np.zeros(P2_x.shape)
c1p1_p = ps
c1p2_p = np.sqrt(2) * ps
p1c2_p = np.sqrt(2) * ps
p2c2_p = ps
rel_p = np.sqrt(2.0)/(np.sqrt(2.0)-1.0) #array scaling factor1
if array == 'square_g': #Square Array: Gamma Config
C1_x = np.subtract(xgrid, ps / 2.0)
C1_y = np.add(ygrid, ps / 2.0)
C1_z = np.zeros(C1_x.shape)
P1_x = np.add(xgrid, ps / 2.0)
P1_y = np.add(ygrid, ps / 2.0)
P1_z = np.zeros(P1_x.shape)
P2_x = np.subtract(xgrid, ps / 2.0)
P2_y = np.subtract(ygrid, ps / 2.0)
P2_z = np.zeros(P2_x.shape)
c1p1_p = ps
c1p2_p = ps
p1c2_p = ps
p2c2_p = ps
rel_p = np.sqrt(2.0)/(np.sqrt(2.0)-1.0) #array scaling factor1
if array == 'trap_l': #Trapezoid Array: Longitudinal config
C1_x = np.subtract(xgrid, ps / 2.0)
C1_y = np.add(ygrid, ps2 / 2.0)
C1_z = np.zeros(C1_x.shape)
P1_x = np.add(xgrid, ps / 2.0)
P1_y = np.add(ygrid, ps2 / 2.0)
P1_z = np.zeros(P1_x.shape)
P2_x = np.add(xgrid, ps1 / 2.0)
P2_y = np.subtract(ygrid, ps2 / 2.0)
P2_z = np.zeros(P2_x.shape)
c1p1_p = ps
c1p2_p = np.sqrt(np.square(ps2) + np.square(ps1-(ps1-ps) / 2))
p1c2_p = np.sqrt(np.square(ps2) + np.square(ps1-(ps1-ps) / 2))
p2c2_p = ps1
rel_p = np.sqrt((ps1 * ps) + np.sqrt(np.square(ps1 - ps) / 2.0 + \
np.square(ps2)))/(np.sqrt((ps1 * ps) + np.sqrt(np.square(ps1 - ps) / 2.0 + \
np.square(ps2))) - 1.0)
if array == 'trap_b': #Trapezoid Array: Broadside Config
C1_x = np.subtract(xgrid, ps / 2.0)
C1_y = np.add(ygrid, ps2 / 2.0)
C1_z = np.zeros(C1_x.shape)
P1_x = np.subtract(xgrid, ps1 / 2.0)
P1_y = np.subtract(ygrid, ps2 / 2.0)
P1_z = np.zeros(P1_x.shape)
P2_x = np.add(xgrid, ps1 / 2.0)
P2_y = np.subtract(ygrid, ps2 / 2.0)
P2_z = np.zeros(P2_x.shape)
c1p1_p = ps
c1p2_p = np.sqrt(np.square(ps2) + np.square(ps1-(ps1-ps) / 2))
p1c2_p = np.sqrt(np.square(ps2) + np.square(ps1-(ps1-ps) / 2))
p2c2_p = ps1
rel_p = np.sqrt((ps1 * ps) + np.sqrt(np.square(ps1 - ps) / 2.0 + \
np.square(ps2)))/(np.sqrt((ps1 * ps) + np.sqrt(np.square(ps1 - ps) / 2.0 + \
np.square(ps2))) - 1.0)
if array == 'trap_t':#Trapezoid Array: Theta config
C1_x = np.subtract(xgrid, ps / 2.0)
C1_y = np.add(ygrid, ps2 / 2.0)
C1_z = np.zeros(C1_x.shape)
P1_x = np.add(xgrid, ps / 2.0)
P1_y = np.add(ygrid, ps2 / 2.0)
P1_z = np.zeros(P1_x.shape)
P2_x = np.subtract(xgrid, ps1 / 2.0)
P2_y = np.subtract(ygrid, ps2 / 2.0)
P2_z = np.zeros(P1_x.shape)
c1p1_p = ps
c1p2_p = np.sqrt(np.square(ps2) + np.square(ps1-(ps1-ps) / 2))
p1c2_p = np.sqrt(np.square(ps2) + np.square(ps1-(ps1-ps) / 2))
p2c2_p = ps1
rel_p = np.sqrt((ps1 * ps) + np.sqrt(np.square(ps1 - ps) / 2.0 + \
np.square(ps2)))/(np.sqrt((ps1 * ps) + np.sqrt(np.square(ps1 - ps) / 2.0 + \
np.square(ps2))) - 1.0)
resistivity_sum = np.ones(C1_x.shape)
for xyz in sphere_xyz:
c1_x_delta = np.subtract(C1_x,sphere_x) #change in x-between C1(x) and sphere(x)
c1_y_delta = np.subtract(C1_y,sphere_y) #change in y-between C1(y) and sphere(y)
c1_z_delta = np.add(C1_z,sphere_z) #change in z-between C1(z) and sphere(z)
c1_l = np.sqrt(np.square(c1_x_delta)+ np.square(c1_y_delta)) #horizontal (x,y) distance between C1 and centre of sphere
c1_s = np.sqrt(np.square(c1_z_delta) + np.square(c1_x_delta) + np.square(c1_y_delta)) #relative depth of sphere
c1_r = np.sqrt(np.square(c1_l)+np.square(c1_s)) #distance to sphere
p1_x_delta = np.subtract(P1_x,sphere_x) #same as previous, but for P1
p1_y_delta = np.subtract(P1_y,sphere_y)
p1_z_delta = np.add(P1_z,sphere_z)
p1_l = np.sqrt(np.square(p1_x_delta)+ np.square(p1_y_delta)) #horizontal (x,y) distance between P1 and centre of sphere
p1_s = np.sqrt(np.square(p1_z_delta) + np.square(p1_x_delta) + np.square(p1_y_delta)) #relative depth of sphere
p1_r = np.sqrt(np.square(p1_l)+np.square(p1_s)) #distance to sphere
p2_x_delta = np.subtract(P2_x,sphere_x)
p2_y_delta = np.subtract(P2_y,sphere_y)
p2_z_delta = np.add(P2_z,sphere_z)
p2_l = np.sqrt(np.square(p2_x_delta)+ np.square(p2_y_delta)) #horizontal (x,y) distance between P2 and centre of sphere
p2_s = np.sqrt(np.square(p2_z_delta) + np.square(p2_x_delta) + np.square(p2_y_delta)) #relative depth of sphere
p2_r = np.sqrt(np.square(p2_l)+np.square(p2_s))
#Calculating the geometry factors for the probe pairs:
c1p1_gf = (c1_r/(np.sqrt(np.square(c1_s)*np.square(np.square(c1_r)-1.0)+ \
np.square((np.square(c1_r)*(c1p1_p-c1_l))+c1_l))))-(1.0/(c1_r*(np.sqrt(np.square(c1_r)\
+np.square(c1p1_p)-2.0*c1_l*c1p1_p))))
c1p2_gf =(c1_r/(np.sqrt(np.square(c1_s)*np.square(np.square(c1_r)-1.0)+ \
np.square((np.square(c1_r)*(c1p2_p-c1_l))+c1_l))))-(1.0/(c1_r*(np.sqrt(np.square(c1_r)\
+np.square(c1p2_p)-2.0*c1_l*c1p2_p))))
p1c2_gf = (p1_r/(np.sqrt(np.square(p1_s)*np.square(np.square(p1_r)-1.0)+\
np.square((np.square(p1_r)*(p1c2_p-p1_l))+p1_l))))-(1.0/(p1_r*(np.sqrt(np.square(p1_r)\
+np.square(p1c2_p)-2.0*p1_l*p1c2_p))))
p2c2_gf = (p2_r/(np.sqrt(np.square(p2_s)*np.square(np.square(p2_r)-1.0)+\
np.square((np.square(p2_r)*(p2c2_p-p2_l))+p2_l))))-(1.0/(p2_r*(np.sqrt(np.square(p2_r)\
+np.square(p2c2_p-2.0*p2_l*p2c2_p)))))
#Calculating the probe pairs resistivity responses:
gf = c1p1_gf - c1p2_gf - p1c2_gf + p2c2_gf
resistivity = contrast * rel_p * gf * ps
response = np.add(resistivity_sum, resistivity)
'''Plot Results: 2D'''
fig, ax = plt.subplots()
p = ax.pcolor(xgrid, ygrid, response, cmap=plt.cm.Greys)
cb = fig.colorbar(p, ax=ax)
ax.set_xlabel('X Position')
ax.set_ylabel('Y Position')
plt.title(array+': Insulating Feature')
'''Plot Results: Wireframe'''
fig = plt.figure(figsize=(10, 7))
ax = fig.gca(projection='3d')
surf = ax.plot_wireframe(xgrid, ygrid, response, rstride=4, cstride=4, color='b', alpha=0.7)
ax.set_xlabel('X Position')
ax.set_ylabel('Y Position')
ax.set_zlabel('Response')
plt.title('Twin-Probe Broadside: Insulating Feature')
plt.show()
| gpl-2.0 |
liberatorqjw/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 16 | 10538 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
suhelhammoud/omr | src/experimental/distant.py | 1 | 4471 | import numpy as np
import cv2
from matplotlib import pyplot as plt
# from skimage.filters import threshold_adaptive
from utils.learn import detect, detect2
def applyMor(gray):
kernel = np.ones((5, 5), np.uint8)
gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
# gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
return gray
def centers():
# img = cv2.imread('data/out/next.jpg')
img = cv2.imread('data/out/sections/sec_answers.jpg')
# img = cv2.imread('data/out/sections/sec_two.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# gray =cv2.medianBlur(gray, 9 , 0)
# kernel = np.ones((1, 1), np.uint8)
# gray = cv2.dilate(gray, kernel, iterations=1)
cv2.imshow('grabi', gray)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \
cv2.THRESH_BINARY, 29, 0)
#
# # noise removal
# kernel = np.ones((2, 2), np.uint8)
#
# opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations= 1)
#
# # sure background area
# sure_bg = opening
# sure_bg = cv2.dilate(opening, kernel, iterations=1)
# #
# # Finding sure foreground area
dist_transform = cv2.distanceTransform(thresh, cv2.DIST_L2, 3)
ret, sure_fg = cv2.threshold(dist_transform, 0.3 * dist_transform.max(), 255, 0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
cv2.imshow('org', img)
cv2.imshow("sure_fg", sure_fg)
inverted = cv2.bitwise_not(sure_fg)
# inverted =cv2.medianBlur(inverted, 5 , 0)
kernel = np.ones((3, 3), np.uint8)
inverted = cv2.dilate(inverted, kernel, iterations=1)
cv2.imshow("inverted", inverted)
# blob(inverted)
cv2.imwrite("data/out/inverted.jpg", inverted)
blob(inverted)
cv2.waitKey(0)
cv2.destroyAllWindows()
def blob(im):
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 100
params.maxThreshold = 255
# Filter by Area.
params.filterByArea = True
params.minArea = 100
# Filter by Circularity
# params.filterBCircularity = True
# params.minCircularity = 0.1
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.1
# Filter by Inertia
# params.filterByInertia = True
# params.minInertiaRatio = 0.01
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3:
detector = cv2.SimpleBlobDetector(params)
else:
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im)
for kp in keypoints:
print(str(kp.pt[0] )+ "\t" + str(kp.pt[1]) +"\t" +str(kp.size))
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures
# the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0, 255, 0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Show blobs
cv2.imshow("Keypoints", im_with_keypoints)
cv2.imwrite("data/out/keypoints.jpg", im_with_keypoints)
# cv2.waitKey(0)
def test():
# img = cv2.imread('data/out/sections/sec_one.jpg')
img = cv2.imread('data/out/next.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# warped = threshold_adaptive(gray, 401, offset = 5)
# warped = warped.astype("uint8") * 255
th2 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \
cv2.THRESH_BINARY, 29, 0)
th2 = applyMor(th2)
#
# th3 = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
# cv2.THRESH_BINARY,29,0)
cv2.imshow('gray', gray)
cv2.imshow('th2', th2)
# cv2.imshow('th3', th3)
cv2.waitKey(0)
#
# # noise removal
# kernel = np.ones((3,3),np.uint8)
# opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
# # sure background area
# sure_bg = cv2.dilate(opening,kernel,iterations=3)
# # Finding sure foreground area
# dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
# ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# # Finding unknown region
# sure_fg = np.uint8(sure_fg)
# unknown = cv2.subtract(sure_bg,sure_fg)
#
if __name__ == '__main__':
centers()
| apache-2.0 |
ishank08/scikit-learn | examples/tree/plot_iris.py | 86 | 1965 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
indhub/mxnet | example/gluon/dcgan.py | 7 | 8812 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
import argparse
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd
import numpy as np
import logging
from datetime import datetime
import os
import time
def fill_buf(buf, i, img, shape):
n = buf.shape[0]//shape[1]
m = buf.shape[1]//shape[0]
sx = (i%m)*shape[0]
sy = (i//m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
return None
def visual(title, X, name):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = buff[:,:,::-1]
plt.imshow(buff)
plt.title(title)
plt.savefig(name)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset to use. options are cifar10 and imagenet.')
parser.add_argument('--batch-size', type=int, default=64, help='input batch size')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='./results', help='folder to output images and model checkpoints')
parser.add_argument('--check-point', default=True, help="save results at each epoch or not")
opt = parser.parse_args()
print(opt)
logging.basicConfig(level=logging.DEBUG)
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
if opt.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu()
check_point = bool(opt.check_point)
outf = opt.outf
if not os.path.exists(outf):
os.makedirs(outf)
def transformer(data, label):
# resize to 64x64
data = mx.image.imresize(data, 64, 64)
# transpose from (64, 64, 3) to (3, 64, 64)
data = mx.nd.transpose(data, (2,0,1))
# normalize to [-1, 1]
data = data.astype(np.float32)/128 - 1
# if image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = mx.nd.tile(data, (3, 1, 1))
return data, label
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=True, transform=transformer),
batch_size=opt.batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=False, transform=transformer),
batch_size=opt.batch_size, shuffle=False)
# build the generator
netG = nn.Sequential()
with netG.name_scope():
# input is Z, going into a convolution
netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 4 x 4
netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 8 x 8
netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 16 x 16
netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 32 x 32
netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
netG.add(nn.Activation('tanh'))
# state size. (nc) x 64 x 64
# build the discriminator
netD = nn.Sequential()
with netD.name_scope():
# input is (nc) x 64 x 64
netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 32 x 32
netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 16 x 16
netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 8 x 8
netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 4 x 4
netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False))
# loss
loss = gluon.loss.SoftmaxCrossEntropyLoss()
# initialize the generator and the discriminator
netG.initialize(mx.init.Normal(0.02), ctx=ctx)
netD.initialize(mx.init.Normal(0.02), ctx=ctx)
# trainer for the generator and the discriminator
trainerG = gluon.Trainer(netG.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
# ============printing==============
real_label = mx.nd.ones((opt.batch_size,), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx)
metric = mx.metric.Accuracy()
print('Training... ')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
iter = 0
for epoch in range(opt.nepoch):
tic = time.time()
btic = time.time()
for data, _ in train_data:
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real_t
data = data.as_in_context(ctx)
noise = mx.nd.random.normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx)
with autograd.record():
output = netD(data)
output = output.reshape((opt.batch_size, 2))
errD_real = loss(output, real_label)
metric.update([real_label,], [output,])
fake = netG(noise)
output = netD(fake.detach())
output = output.reshape((opt.batch_size, 2))
errD_fake = loss(output, fake_label)
errD = errD_real + errD_fake
errD.backward()
metric.update([fake_label,], [output,])
trainerD.step(opt.batch_size)
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
with autograd.record():
output = netD(fake)
output = output.reshape((-1, 2))
errG = loss(output, real_label)
errG.backward()
trainerG.step(opt.batch_size)
name, acc = metric.get()
# logging.info('speed: {} samples/s'.format(opt.batch_size / (time.time() - btic)))
logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d' %(mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch))
if iter % 1 == 0:
visual('gout', fake.asnumpy(), name=os.path.join(outf,'fake_img_iter_%d.png' %iter))
visual('data', data.asnumpy(), name=os.path.join(outf,'real_img_iter_%d.png' %iter))
iter = iter + 1
btic = time.time()
name, acc = metric.get()
metric.reset()
logging.info('\nbinary training acc at epoch %d: %s=%f' % (epoch, name, acc))
logging.info('time: %f' % (time.time() - tic))
if check_point:
netG.save_parameters(os.path.join(outf,'generator_epoch_%d.params' %epoch))
netD.save_parameters(os.path.join(outf,'discriminator_epoch_%d.params' % epoch))
netG.save_parameters(os.path.join(outf, 'generator.params'))
netD.save_parameters(os.path.join(outf, 'discriminator.params'))
| apache-2.0 |
dsullivan7/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
imochaz/epilepsy-system | seizure detection code/Stacked Autoencoders for Seizure Detection/pylearn2ext/chbmit.py | 1 | 14998 | import numpy as np
import os
import pickle
import pandas as pd
from scipy.io import loadmat
from pylearn2.format.target_format import OneHotFormatter
from scipy.signal import butter, filtfilt
from sklearn import preprocessing
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix, DefaultViewConverter
class CHBMIT(DenseDesignMatrix):
# These are representative channel MATLAB index (it needs to be subtracted by 1 before using in python)
rep_channel_matlab_idx = {
1: np.asarray([7,8,11,13,14,21,22]),
3: np.asarray([3,4,6,16,19]),
5: np.asarray([4,5,7,8,11,12,17,18]),
8: np.asarray([6,7,8,10,11,17,18]),
10: np.asarray([2,3,19,20,21]),
20: np.asarray([1,2,3,19,20,21,24,25,26,27,28])
}
def __init__(self, patient_id, which_set, preprocessor_path, data_dir, transform, window_size, batch_size,
specified_files=None, leave_one_out_file=None, axes=('b', 0, 1, 'c'), default_seed=0):
"""
The CHBMIT dataset customized for leave-one-file-out cross validation.
Parameters
----------
patient_id : int
Patient ID.
which_set : string
Name used to specify which partition of the dataset to be loaded (e.g., 'train', 'valid', or 'test').
If not specified, all data will be loaded.
preprocessor_path : string
File path to store the scaler for pre-processing the EEG data.
data_dir : string
Directory that store the source EEG data.
transform : string
Specify how to transform the data. ('multiple_channels' | 'single_channel')
window_size : int
Size of each sample.
batch_size : int
Size of the batch, used for zero-padding to make the the number samples dividable by the batch size.
specified_files : dictionary
Dictionary to specified which files are used for training, validation and testing.
leave_one_out_file : int
Index of the withheld file.
axes : tuple
axes of the DenseDesignMatrix.
default_seed : int, optional
Seed for random.
For preprocessing, see more in
https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/datasets/preprocessing.py
For customizing dataset, see more in
https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/scripts/icml_2013_wrepl/emotions/emotions_dataset.py
"""
self.patient_id = patient_id
self.data_dir = data_dir
self.preprocessor_path = preprocessor_path
self.window_size = window_size
self.n_classes = 2
self.default_seed = default_seed
self.transform = transform
self.specified_files = specified_files
self.leave_one_out_file = leave_one_out_file
self.batch_size = batch_size
raw_X, raw_y = self._load_data(which_set=which_set)
self.raw_X = raw_X
self.raw_y = raw_y
# Filter representative channels
if not(self.rep_channel_matlab_idx.get(patient_id) is None):
# Map the representative MATLAB index to python index
# Also the raw_data read from the .mat file has already removed inactive channels
# So we need to search for the match original index with MATLAB index
# Then transfer to the python index
self.rep_channel_python_idx = np.empty(0, dtype=int)
for ch in self.rep_channel_matlab_idx[patient_id]:
if ch in self.used_channel_matlab_idx:
ch_python_idx = np.where(ch == self.used_channel_matlab_idx)[0]
self.rep_channel_python_idx = np.append(self.rep_channel_python_idx, ch_python_idx)
else:
raise Exception('There is no representative channel ' + str(ch) + ' in the input data.')
assert np.all(self.used_channel_matlab_idx[self.rep_channel_python_idx] ==
self.rep_channel_matlab_idx[patient_id])
raw_X = raw_X[self.rep_channel_python_idx, :]
self.n_channels = self.rep_channel_python_idx.size
print 'Used channel MATLAB index:', self.used_channel_matlab_idx
print 'Representative channel MATLAB index:', self.rep_channel_matlab_idx[patient_id]
print 'Representative channel Python index:', self.rep_channel_python_idx
self.sample_shape = [self.window_size, 1, self.n_channels]
self.sample_size = np.prod(self.sample_shape)
# Preprocessing
if which_set == 'train':
scaler = preprocessing.StandardScaler()
scaler = scaler.fit(raw_X.transpose())
with open(self.preprocessor_path, 'w') as f:
pickle.dump(scaler, f)
scaled_X = scaler.transform(raw_X.transpose()).transpose()
else:
with open(self.preprocessor_path) as f:
scaler = pickle.load(f)
scaled_X = scaler.transform(raw_X.transpose()).transpose()
# Transform data into format usable by the network
if self.transform == 'multiple_channels':
X, y, view_converter = self._transform_multi_channel_data(X=scaled_X, y=raw_y)
elif self.transform == 'single_channel':
X, y, view_converter = self._transform_single_channel_data(X=scaled_X, y=raw_y)
else:
raise Exception('Invalid transform mode.')
# Zero-padding if the batch size is not compatible
extra = (batch_size - X.shape[0]) % batch_size
assert (X.shape[0] + extra) % batch_size == 0
if extra > 0:
X = np.concatenate((X, np.zeros((extra, X.shape[1]),
dtype=float)),
axis=0)
y = np.concatenate((y, np.zeros((extra, y.shape[1]),
dtype=int)),
axis=0)
assert X.shape[0] % batch_size == 0
assert y.size % batch_size == 0
# Initialize DenseDesignMatrix
DenseDesignMatrix.__init__(self, X=X, y=y,
view_converter=view_converter,
axes=('b', 0, 1, 'c'))
def _load_data(self, which_set):
# Get seizure files
seizure_files_df = pd.read_table(os.path.join(self.data_dir, 'RECORDS-WITH-SEIZURES.txt'),
sep=' ', names=['filename', 'period'], header=None)
if self.patient_id <10:
search_str = 'chb0' + str(self.patient_id)
else:
search_str = 'chb' + str(self.patient_id)
seizure_files = seizure_files_df['filename'][seizure_files_df['filename'].str.contains(search_str)]
seizure_files = seizure_files.str.replace('.edf', '_mod.mat', case=False).values
print 'Seizure files\n', seizure_files
# Train, cv and test files
if not (self.specified_files is None):
train_files = seizure_files[self.specified_files['train_files']]
cv_files = seizure_files[self.specified_files['cv_files']]
test_files = seizure_files[self.specified_files['test_files']]
elif not (self.leave_one_out_file is None):
train_files = np.setdiff1d(seizure_files, seizure_files[self.leave_one_out_file])
cv_files = seizure_files[self.leave_one_out_file:self.leave_one_out_file+1]
test_files = seizure_files[self.leave_one_out_file:self.leave_one_out_file+1]
else:
np.random.seed(self.default_seed)
permute_files = np.random.permutation(seizure_files)
train_files = permute_files[:-2]
cv_files = permute_files[-2:-1]
test_files = permute_files[-1:]
print 'Train files\n', train_files
print 'CV files\n', cv_files
print 'Test files\n', test_files
print ''
if which_set == 'train':
print("Loading training data...")
files = train_files
elif which_set == 'valid':
print("Loading validation data...")
files = cv_files
elif which_set == 'test':
print("Loading test data...")
files = test_files
else:
raise ("Invalid set")
print files
sampling_rate = -1
n_channels = -1
X = None
y = np.empty(0, dtype=int)
seizure_seconds = np.empty(0, dtype=int)
total_seconds = 0
channel_labels = None
used_channel_matlab_idx = None
for f in files:
mat = loadmat(self.data_dir + '/' + f)
# Number of channels
if n_channels == -1:
n_channels = mat['X'].shape[0]
assert n_channels == mat['X'].shape[0]
# Channel labels
if channel_labels is None:
channel_labels = np.asarray([lb[0][0] for lb in mat['labels']])
assert np.all(channel_labels == np.asarray([lb[0][0] for lb in mat['labels']]))
# Channel index (MATLAB index, start from 1, not 0) used to filter active channels from the source files
if used_channel_matlab_idx is None:
used_channel_matlab_idx = mat['used_channel_idx'][0]
assert np.all(used_channel_matlab_idx == mat['used_channel_idx'][0])
# Sampling rate
if sampling_rate == -1:
sampling_rate = mat['sampling_rate'][0, 0]
assert sampling_rate == mat['sampling_rate'][0, 0]
# EEG data
if X is None:
X = mat['X']
else:
X = np.concatenate((X, mat['X']), axis=1)
# Seizure labels
y = np.append(y, mat['y'][0, :])
# Store index of seizure seconds
seizure_seconds = np.append(seizure_seconds, mat['seizure_second'][0, :] + total_seconds)
# Collect total seconds
total_seconds = total_seconds + (mat['X'].shape[1] / (sampling_rate * 1.0))
assert total_seconds == X.shape[1] / sampling_rate
# Zero-padding if the window size is not compatible
extra = (self.window_size - X.shape[1]) % self.window_size
assert (X.shape[1] + extra) % self.window_size == 0
if extra > 0:
X = np.concatenate((X, np.zeros((X.shape[0], extra),
dtype=float)),
axis=1)
y = np.append(y, np.zeros(extra, dtype=int))
assert X.shape[1] % self.window_size == 0
assert y.size % self.window_size == 0
# Store metadata
self.sampling_rate = sampling_rate
self.n_channels = n_channels
self.seizure_seconds = seizure_seconds
self.total_seconds = total_seconds
self.channel_labels = channel_labels
self.used_channel_matlab_idx = used_channel_matlab_idx
print 'Seizure seconds:', self.seizure_seconds
return X, y
def _partition_data(self, X, y, partition_size):
partition_size = max(1, partition_size)
X_parts = np.asarray([X[:, i:i + partition_size] for i in range(0, X.shape[1], partition_size)])
y_parts = np.asarray([y[i:i + partition_size] for i in range(0, y.size, partition_size)])
return X_parts, y_parts
def _transform_multi_channel_data(self, X, y):
# Data partitioning
parted_X, parted_y = self._partition_data(X=X, y=y, partition_size=self.window_size)
transposed_X = np.transpose(parted_X, [0, 2, 1])
converted_X = np.reshape(transposed_X, (transposed_X.shape[0],
transposed_X.shape[1],
1,
transposed_X.shape[2]))
# Create view converter
view_converter = DefaultViewConverter(shape=self.sample_shape,
axes=('b', 0, 1, 'c'))
# Convert data into a design matrix
view_converted_X = view_converter.topo_view_to_design_mat(converted_X)
assert np.all(converted_X == view_converter.design_mat_to_topo_view(view_converted_X))
# Format the target into proper format
sum_y = np.sum(parted_y, axis=1)
sum_y[sum_y > 0] = 1
one_hot_formatter = OneHotFormatter(max_labels=self.n_classes)
hot_y = one_hot_formatter.format(sum_y)
return view_converted_X, hot_y, view_converter
def _transform_single_channel_data(self, X, y):
windowed_X = np.reshape(X, (-1, self.window_size))
windowed_y = np.reshape(y, (-1, self.window_size))
# Format the target into proper format
sum_y = np.sum(windowed_y, axis=1)
sum_y[sum_y > 0] = 1
# Duplicate the labels for all channels
dup_y = np.tile(sum_y, self.n_channels)
one_hot_formatter = OneHotFormatter(max_labels=self.n_classes)
hot_y = one_hot_formatter.format(dup_y)
return windowed_X, hot_y, None
if __name__ == '__main__':
dataset = CHBMIT(patient_id=1,
which_set='train',
preprocessor_path='../models/scaler.pkl',
data_dir='/Users/akara/Workspace/data/chbmit',
transform='single_channel',
window_size=256,
batch_size=20)
# dataset = CHBMIT(patient_id=1,
# which_set='train',
# preprocessor_path='../models/scaler.pkl',
# data_dir='/Users/akara/Workspace/data/chbmit',
# transform='single_channel',
# specified_files={
# 'train_files': np.asarray([0,1,2,3,4,5]),
# 'cv_files': np.asarray([6]),
# 'test_files': np.asarray([6])
# },
# window_size=256,
# batch_size=20)
# dataset = CHBMIT(patient_id=1,
# which_set='train',
# preprocessor_path='../models/scaler.pkl',
# data_dir='/Users/akara/Workspace/data/chbmit',
# transform='single_channel',
# leave_one_out_file=4,
# window_size=256,
# batch_size=20)
# from pylearn2ext.chbmit_eeg_dataset import ChbMitDatasetSDAE
# dataset2 = ChbMitDatasetSDAE(patient_id=1,
# which_set='train',
# scaler_path='../models/scaler.pkl',
# data_dir='/Users/akara/Workspace/data/chbmit',
# sample_size_second=1,
# batch_size=20)
#
# assert np.all(dataset.X == dataset2.X) | bsd-3-clause |
chenjun0210/tensorflow | tensorflow/contrib/keras/python/keras/callbacks.py | 6 | 31200 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from collections import Iterable
from collections import OrderedDict
import csv
import json
import os
import time
import warnings
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.generic_utils import Progbar
from tensorflow.python.ops import array_ops
from tensorflow.python.summary import summary as tf_summary
# pylint: disable=g-import-not-at-top
try:
import requests
except ImportError:
requests = None
# pylint: enable=g-import-not-at-top
class CallbackList(object):
"""Container abstracting a list of callbacks.
Arguments:
callbacks: List of `Callback` instances.
queue_length: Queue length for keeping
running statistics over callback execution time.
"""
def __init__(self, callbacks=None, queue_length=10):
callbacks = callbacks or []
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
for callback in self.callbacks:
callback.set_model(model)
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Arguments:
epoch: integer, index of epoch.
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs=None):
"""Called right before processing a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if (self._delta_t_batch > 0. and
delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1):
warnings.warn('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' %
delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs=None):
"""Called at the end of a batch.
Arguments:
batch: integer, index of batch within the current epoch.
logs: dictionary of logs.
"""
logs = logs or {}
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if (self._delta_t_batch > 0. and
(delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1)):
warnings.warn('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' %
delta_t_median)
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Called at the end of training.
Arguments:
logs: dictionary of logs.
"""
logs = logs or {}
for callback in self.callbacks:
callback.on_train_end(logs)
def __iter__(self):
return iter(self.callbacks)
class Callback(object):
"""Abstract base class used to build new callbacks.
# Properties
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs include `acc` and `loss`, and
optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
"""
def __init__(self):
self.validation_data = None
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
pass
def on_batch_end(self, batch, logs=None):
pass
def on_train_begin(self, logs=None):
pass
def on_train_end(self, logs=None):
pass
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
"""
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
logs[k] = self.totals[k] / self.seen
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seens or steps (batches) seen.
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples'):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(target=self.target, verbose=self.verbose)
self.seen = 0
def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
if self.use_steps:
self.seen += 1
else:
self.seen += batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and self.seen < self.target:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values, force=True)
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
class ModelCheckpoint(Callback):
"""Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
Arguments:
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of epochs) between checkpoints.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1):
super(ModelCheckpoint, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('Epoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch, self.monitor, self.best,
current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
print('Epoch %05d: %s did not improve' % (epoch, self.monitor))
else:
if self.verbose > 0:
print('Epoch %05d: saving model to %s' % (epoch, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
class EarlyStopping(Callback):
"""Stop training when a monitored quantity has stopped improving.
Arguments:
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto'):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.min_delta = min_delta
self.wait = 0
self.stopped_epoch = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('EarlyStopping mode %s is unknown, '
'fallback to auto mode.' % (self.mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
self.wait = 0 # Allow instances to be re-used
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor)
if current is None:
warnings.warn('Early stopping requires %s available!' % (self.monitor),
RuntimeWarning)
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
self.wait += 1
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch))
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
Arguments:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
headers: Dictionary; optional custom HTTP headers.
Defaults to:
`{'Accept': 'application/json',
'Content-Type': 'application/json'}`
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None):
super(RemoteMonitor, self).__init__()
if headers is None:
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
self.root = root
self.path = path
self.field = field
self.headers = headers
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires ' 'the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
send[k] = v
try:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
warnings.warn('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
Arguments:
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
"""
def __init__(self, schedule):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
K.set_value(self.model.optimizer.lr, lr)
class TensorBoard(Callback):
"""Tensorboard basic visualizations.
This callback writes a log for TensorBoard, which allows
you to visualize dynamic graphs of your training and test
metrics, as well as activation histograms for the different
layers in your model.
Arguments:
log_dir: the path of the directory where to save the log
files to be parsed by Tensorboard.
histogram_freq: frequency (in epochs) at which to compute activation
histograms for the layers of the model. If set to 0,
histograms won't be computed.
write_graph: whether to visualize the graph in Tensorboard.
The log file can become quite large when
write_graph is set to True.
write_images: whether to write model weights to visualize as
image in Tensorboard.
"""
def __init__(self,
log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=False):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
self.merged = None
self.write_graph = write_graph
self.write_images = write_images
def set_model(self, model):
self.model = model
self.sess = K.get_session()
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
tf_summary.histogram(weight.name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = w_img.get_shape()
if len(shape) > 1 and shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
if len(shape) == 1:
w_img = array_ops.expand_dims(w_img, 0)
w_img = array_ops.expand_dims(array_ops.expand_dims(w_img, 0), -1)
tf_summary.image(weight.name, w_img)
if hasattr(layer, 'output'):
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
self.merged = tf_summary.merge_all()
if self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
else:
self.writer = tf_summary.FileWriter(self.log_dir)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
if self.validation_data and self.histogram_freq:
if epoch % self.histogram_freq == 0:
# TODO(fchollet): implement batched calls to sess.run
# (current call will likely go OOM on GPU)
if self.model.uses_learning_phase:
cut_v_data = len(self.model.inputs)
val_data = self.validation_data[:cut_v_data] + [0]
tensors = self.model.inputs + [K.learning_phase()]
else:
val_data = self.validation_data
tensors = self.model.inputs
feed_dict = dict(zip(tensors, val_data))
result = self.sess.run([self.merged], feed_dict=feed_dict)
summary_str = result[0]
self.writer.add_summary(summary_str, epoch)
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.writer.add_summary(summary, epoch)
self.writer.flush()
def on_train_end(self, _):
self.writer.close()
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Arguments:
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs with no improvement
after which learning rate will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
epsilon: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
epsilon=1e-4,
cooldown=0,
min_lr=0):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
self.factor = factor
self.min_lr = min_lr
self.epsilon = epsilon
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode), RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.epsilon)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
self.lr_epsilon = self.min_lr * 1e-4
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
self.monitor, RuntimeWarning)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
if self.wait >= self.patience:
old_lr = float(K.get_value(self.model.optimizer.lr))
if old_lr > self.min_lr + self.lr_epsilon:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: reducing learning rate to %s.' % (epoch,
new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
self.wait += 1
def in_cooldown(self):
return self.cooldown_counter > 0
class CSVLogger(Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Arguments:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if os.path.exists(self.filename):
with open(self.filename) as f:
self.append_header = not bool(len(f.readline()))
self.csv_file = open(self.filename, 'a')
else:
self.csv_file = open(self.filename, 'w')
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if not self.writer:
self.keys = sorted(logs.keys())
class CustomDialect(csv.excel):
delimiter = self.sep
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=['epoch'] + self.keys,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
class LambdaCallback(Callback):
"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time. Note that the callbacks expects positional
arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Arguments:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Plot the loss after every epoch.
import numpy as np
import matplotlib.pyplot as plt
plot_loss_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: plt.plot(np.arange(epoch),
logs['loss']))
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
plot_loss_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| apache-2.0 |
466152112/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
billzorn/fpunreal | titanfp/bench.py | 1 | 33022 | import random
import math
import itertools
import multiprocessing
import subprocess
import os
import numpy as np
from .fpbench import fpcparser
from .arithmetic import ieee754, sinking
from .arithmetic import posit
from .arithmetic import mpmf
from .arithmetic import core2math
from .arithmetic import evalctx
from .titanic import digital
from .titanic import gmpmath
#from .titanic import wolfmath
fpbench_root = '/home/bill/private/research/origin-FPBench'
fpbench_tools = os.path.join(fpbench_root, 'tools')
fpbench_benchmarks = os.path.join(fpbench_root, 'benchmarks')
def run_tool(toolname, core, *args):
tool = subprocess.Popen(
args=['racket', os.path.join(fpbench_tools, toolname), *args],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = tool.communicate(input=core.sexp.encode('utf-8'))
success = True
retval = tool.wait()
if retval != 0:
success = False
print('subprocess:\n {}\nreturned {:d}'.format(' '.join(tool.args), retval),
file=sys.stderr, flush=True)
if stderr_data:
print(stderr_data, file=sys.stderr, flush=True)
return success, stdout_data.decode('utf-8')
def gen_input(e, p, nbits, negative=False):
# p <= nbits
significand = random.randint(0, (1 << (nbits - 1)) - 1) | (1 << (nbits - 1))
hi = digital.Digital(negative=negative, c=significand, exp=e - nbits + 1)
lo = hi.round_m(p)
return hi, lo
def gen_e(e, c, negative=False):
exp = e - c.bit_length() + 1
return digital.Digital(negative=negative, exp=exp, c=c)
def linear_ulps(x, y):
smaller_n = min(x.n, y.n)
x_offset = x.n - smaller_n
y_offset = y.n - smaller_n
x_c = x.c << x_offset
y_c = y.c << y_offset
return x_c - y_c
def bits_agreement(hi, lo):
bitsim = gmpmath.geo_sim(hi, lo)
if math.isinf(bitsim):
if bitsim > 0:
agreement = max(hi.p, lo.p)
else:
agreement = 1
else:
agreement = int(bitsim) + 4
hi_exact = digital.Digital(hi, inexact=False, rc=0)
lo_exact = digital.Digital(lo, inexact=False, rc=0)
one_ulp_agreement = None
zero_ulp_agreement = None
for p in range(agreement, -1, -1):
hi_rounded = hi_exact.round_m(p)
lo_rounded = lo_exact.round_m(p)
rounded_ulps = linear_ulps(hi_rounded, lo_rounded)
if one_ulp_agreement is None and abs(rounded_ulps) <= 1:
one_ulp_agreement = p
if zero_ulp_agreement is None and rounded_ulps == 0:
zero_ulp_agreement = p
if one_ulp_agreement is not None and zero_ulp_agreement is not None:
break
if one_ulp_agreement == None:
one_ulp_agreement = 0
if zero_ulp_agreement == None:
zero_ulp_agreement = 0
# if agreement > 0 and (agreement <= one_ulp_agreement or agreement <= zero_ulp_agreement):
# print('possibly underestimated agreement:\n {} vs {}\n {}, {}, {}, {}'
# .format(hi, lo, bitsim, agreement, one_ulp_agreement, zero_ulp_agreement))
return bitsim, one_ulp_agreement, zero_ulp_agreement
ctx4096 = evalctx.IEEECtx(es=32, nbits=4128)
ctx128 = evalctx.IEEECtx(es=32, nbits=160)
ctx64 = evalctx.IEEECtx(es=16, nbits=80)
ctx32 = evalctx.IEEECtx(es=16, nbits=48)
ctx_double = evalctx.IEEECtx(es=11, nbits=64)
ctx512 = evalctx.IEEECtx(es=20, nbits=532)
rejections = 100
progress_update = 10000
batchsize = 20000
def gen_core_arguments(core, es, ps, nbits, ctx):
hi_args, lo_args = zip(*[gen_input(es[i], ps[i], nbits) for i in range(len(core.inputs))])
if core.pre is not None:
for reject in range(rejections):
if (ieee754.Interpreter.interpret_pre(core, hi_args, ctx) and
ieee754.Interpreter.interpret_pre(core, lo_args, ctx)):
break
hi_args, lo_args = zip(*[gen_input(es[i], ps[i], nbits) for i in range(len(core.inputs))])
if not (ieee754.Interpreter.interpret_pre(core, hi_args, ctx) and
ieee754.Interpreter.interpret_pre(core, lo_args, ctx)):
raise ValueError('failed to meet precondition of fpcore:\n{}'
.format(str(core)))
return hi_args, lo_args
def bench_core(core, hi_args, lo_args, ctx):
hi_result = ieee754.Interpreter.interpret(core, hi_args, ctx=ctx)
lo_result = ieee754.Interpreter.interpret(core, lo_args, ctx=ctx)
sunk = sinking.Interpreter.interpret(core, lo_args, ctx)
if sunk.inexact:
p = sunk.p
else:
p = float('inf')
return [p, *bits_agreement(hi_result, lo_result)]
def iter_1arg(erange, prange, benches):
for e1 in erange:
for p1 in prange:
for i in range(benches):
yield [e1], [p1]
def iter_2arg(erange, prange, benches):
for e1 in erange:
for e2 in erange:
for p1 in prange:
for p2 in prange:
for i in range(benches):
yield [e1, e2], [p1, p2]
def sweep(core, cases, nbits, ctx):
records = []
for es, ps in cases:
try:
hi_args, lo_args = gen_core_arguments(core, es, ps, nbits, ctx)
except Exception as e:
if progress_update > 0:
print('!', end='', flush=True)
continue
records.append(bench_core(core, hi_args, lo_args, ctx))
if progress_update > 0 and len(records) % progress_update == 0:
print('.', end='', flush=True)
return records
def sweep_single(core, cases, nbits, ctx):
print('{:s}\nrunning with {:d} total bits'.format(str(core), nbits), flush=True)
records = sweep(core, cases, nbits, ctx)
if progress_update > 0:
print('\ngenerated {:d} records'.format(len(records)), flush=True)
else:
print('generated {:d} records'.format(len(records)), flush=True)
return records
# break arguments up into chunks manually.
# thanks to:
# https://stackoverflow.com/questions/8991506/iterate-an-iterator-by-chunks-of-n-in-python
def grouper(n, iterable):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
def sweep_multi(core, cases, nbits, ctx, nprocs=None):
if nprocs is None:
nprocs = max(multiprocessing.cpu_count() // 2, 1)
print('{:s}\nrunning with {:d} total bits on {:d} processes'.format(str(core), nbits, nprocs), flush=True)
pool = multiprocessing.Pool(processes=nprocs)
arg_iter = ([core, chunk, nbits, ctx] for chunk in grouper(batchsize, cases))
all_records = []
map_invocations = 0
for records in pool.starmap(sweep, arg_iter):
map_invocations += 1
if records is not None:
all_records += records
if progress_update > 0:
print('\nstarmap finished with {:d} invocations'.format(map_invocations), flush=True)
else:
print('starmap finished with {:d} invocations'.format(map_invocations), flush=True)
pool.close()
pool.join()
print('generated {:d} records'.format(len(all_records)), flush=True)
return all_records
benchmarks = {
'nop' : '(FPCore (x) x)',
'add' : '(FPCore (x y) (+ x y))',
'sub' : '(FPCore (x y) (- x y))',
'mul' : '(FPCore (x y) (* x y))',
'div' : '(FPCore (x y) (/ x y))',
'sqrt' : '(FPCore (x) (sqrt x))',
# 'floor' : '(FPCore (x) (floor x))',
# 'fmod' : '(FPCore (x y) (fmod x y))',
# 'sin' : '(FPCore (x) (sin x))',
# 'pow' : '(FPCore (x y) (pow x y))',
'quadratic' : """(FPCore (a b c)
:name "NMSE p42, positive"
:cite (hamming-1987 herbie-2015)
:fpbench-domain textbook
:pre (and (>= (* b b) (* 4 (* a c))) (!= a 0))
(/ (+ (- b) (sqrt (- (* b b) (* 4 (* a c))))) (* 2 a)))
""",
'ex3.1' : """(FPCore (x)
:name "NMSE example 3.1"
:cite (hamming-1987 herbie-2015)
:fpbench-domain textbook
:pre (>= x 0)
(- (sqrt (+ x 1)) (sqrt x)))
""",
'ex3.6' : """(FPCore (x)
:name "NMSE example 3.6"
:cite (hamming-1987 herbie-2015)
:fpbench-domain textbook
:pre (>= x 0)
(- (/ 1 (sqrt x)) (/ 1 (sqrt (+ x 1)))))
""",
'ex3.3.1' : """(FPCore
(x)
:name "NMSE problem 3.3.1"
:cite (hamming-1987 herbie-2015)
:fpbench-domain textbook
:pre (!= x 0)
(- (/ 1 (+ x 1)) (/ 1 x)))
""",
'ex3.3.3' : """(FPCore (x)
:name "NMSE problem 3.3.3"
:cite (hamming-1987 herbie-2015)
:fpbench-domain textbook
:pre (!= x 0 1 -1)
(+ (- (/ 1 (+ x 1)) (/ 2 x)) (/ 1 (- x 1))))
""",
'herbified_quadratic': """(FPCore (a b c)
:herbie-status success
:herbie-time 118637.2451171875
:herbie-bits-used 3392
:herbie-error-input ((256 28.609971950677362) (8000 33.90307227594979))
:herbie-error-output ((256 5.078369297841056) (8000 6.594164753178634))
:name "NMSE p42, positive"
:pre (and (>= (* b b) (* 4 (* a c))) (!= a 0))
(if (<= b -3.2964251401560902e+93)
(- (/ b a))
(if (<= b -9.121837495335558e-234)
(/ 1 (/ (* a 2) (- (sqrt (- (* b b) (* c (* a 4)))) b)))
(if (<= b 4.358108025323294e+96)
(/ 1 (* (+ (sqrt (- (* b b) (* c (* a 4)))) b) (/ 2 (/ (- 4) (/ 1 c)))))
(/ (- c) b)))))
""",
'herbified_ex3.1': """(FPCore (x)
:herbie-status success
:herbie-time 24932.283935546875
:herbie-bits-used 1344
:herbie-error-input ((256 30.095582124185807) (8000 29.789458520339096))
:herbie-error-output ((256 0.18978500976844204) (8000 0.16283740625180287))
:name "NMSE example 3.1"
:pre (>= x 0)
(/ 1 (+ (sqrt (+ x 1)) (sqrt x))))
""",
'herbified_ex3.6': """(FPCore (x)
:herbie-status success
:herbie-time 26448.68994140625
:herbie-bits-used 1088
:herbie-error-input ((256 17.200122070308325) (8000 19.44276561824272))
:herbie-error-output ((256 0.38451646722105215) (8000 0.43027832341504624))
:name "NMSE example 3.6"
:pre (>= x 0)
(* (/ (sqrt (/ 1 (+ (sqrt (+ x 1)) (sqrt x)))) (sqrt x)) (/ (sqrt (/ 1 (+ (sqrt (+ x 1)) (sqrt x)))) (sqrt (+ x 1)))))
""",
'herbified_ex3.3.1': """(FPCore (x)
:herbie-status success
:herbie-time 19409.0048828125
:herbie-bits-used 832
:herbie-error-input ((256 12.87691845436457) (8000 14.037291740926072))
:herbie-error-output ((256 0.078125) (8000 0.0705))
:name "NMSE problem 3.3.1"
:pre (!= x 0)
(/ (/ (- 1) (+ x 1)) x))
""",
'herbified_ex3.3.3': """(FPCore (x)
:herbie-status success
:herbie-time 110668.86499023438
:herbie-bits-used 1088
:herbie-error-input ((256 9.663075481047713) (8000 9.645467709556803))
:herbie-error-output ((256 0.06640625) (8000 0.07328308281331129))
:name "NMSE problem 3.3.3"
:pre (!= x 0 1 -1)
(/ (/ 2 (* (+ x 1) x)) (- x 1)))
""",
'accbench': """(FPCore ()
:name "Accuracy on a 32-bit budget"
(pow (/ (- (/ 27 10) E)
(- PI (+ (sqrt 2) (sqrt 3))))
(/ 67 16)))
""",
'sinkbench': """(FPCore ()
:name "Accuracy on a 32-bit budget (simplified)"
(let [(tmp (/ (- (/ 27 10) E)
(- PI (+ (sqrt 2) (sqrt 3)))))]
(sqrt (* (* tmp tmp) tmp))))
""",
}
cores = { k : fpcparser.compile1(v) for k, v in benchmarks.items() }
#maths = { k : core2math.compile(v) for k, v in cores.items() }
def split_records(records, xidx, yidx):
xs = []
ys = []
for record in records:
xs.append(record[xidx])
ys.append(record[yidx])
return xs, ys
def cdf(xs):
mass = len(xs)
xs_sorted = sorted(xs)
ys = [i / mass for i in range(len(xs))]
return xs_sorted, ys
def split_xs(xs, ys):
cdfs = {}
for x, y in zip(xs, ys):
if x not in cdfs:
cdfs[x] = []
cdfs[x].append(y)
return cdfs
# # from ipython
# import matplotlib.pyplot as plt
# import matplotlib.lines as mlines
def do_scatter(xs, ys, title, fname):
fig, ax = plt.subplots()
fig.set_size_inches(8, 5.5)
ax.set_xlim(0, 12)
ax.set_ylim(-3, 15)
ax.set_xlabel('sinking-point reported precision (bits)')
ax.set_ylabel('actual precision (bits accuracy)')
ax.set_title(title)
ax.scatter(xs, ys, alpha=0.002)
xlim = ax.get_xlim()
blackline = mlines.Line2D(xlim, xlim, color='black')
redline = mlines.Line2D(xlim, [y-1 for y in xlim], color='red')
ax.add_line(blackline)
ax.add_line(redline)
fig.savefig(fname)
def do_cdf(xs, ys, title, fname, use_line2=False):
fig, ax = plt.subplots()
fig.set_size_inches(8, 5.5)
ax.set_xlim(-4, 12)
ax.set_ylim(0.0, 1.1)
ax.set_xlabel('excess precision (bits accuracy)')
ax.set_ylabel('fraction of results')
ax.set_title(title)
#ax.set_xlim(-2, 2)
#ax.set_ylim(-0.1, 0.3)
cdfs = split_xs(xs, ys)
for x, ys2 in cdfs.items():
cdf_xs, cdf_ys = cdf([y - x for y in ys2])
ax.plot(cdf_xs, cdf_ys)
x_min, x_max = ax.get_xlim()
ref_x = np.linspace(x_min, x_max, 1000)
ref_y = [1 - (1) * (2**-x) for x in ref_x]
ref_y2 = [1 - (0.5) * (2**-x) for x in ref_x]
line = mlines.Line2D(ref_x, ref_y, color='black', linestyle='--', linewidth=1)
line2 = mlines.Line2D(ref_x, ref_y2, color='black', linestyle='-.', linewidth=1)
ax.add_line(line)
if use_line2:
ax.add_line(line2)
ax.axvline(x=0.0, color='black', linewidth=1)
fig.savefig(fname)
def make_figs():
import matplotlib
matplotlib.rcParams.update({'font.size': 16})
erange = range(-14, 16)
prange = range(1, 12)
nbits = 32
ctx = ctx64
for corename in ['nop', 'add', 'sub', 'mul', 'div', 'sqrt']:
core = cores[corename]
if len(core.inputs) == 1:
argiter = iter_1arg(erange, prange, 3000)
else:
argiter = iter_2arg(erange, prange, 10)
if corename == 'sub':
line2 = True
else:
line2 = False
data = sweep_multi(core, argiter, nbits, ctx)
xs, ys = split_records(data, 0, 1)
do_scatter(xs, ys, 'accuracy for ' + corename, 'fig/' + corename + '_scatter.png')
do_cdf(xs, ys, 'excess precision for ' + corename, 'fig/' + corename + '_cdf.png', line2)
# some random testing for larger programs
import numpy
import sys
_r_f64_max = (1 << 64) - 1
_r_f32_max = (1 << 32) - 1
def random_float64():
bits = random.randint(1, _r_f64_max)
return float(numpy.frombuffer(bits.to_bytes(8, sys.byteorder),dtype=numpy.float64, count=1, offset=0))
def random_float32():
bits = random.randint(1, _r_f32_max)
return float(numpy.frombuffer(bits.to_bytes(4, sys.byteorder),dtype=numpy.float32, count=1, offset=0))
corenames = [
'quadratic',
'ex3.1',
'ex3.6',
'ex3.3.1',
'ex3.3.3'
]
corenames += ['herbified_' + name for name in corenames]
#maths = {corename: run_tool('core2wls.rkt', cores[corename]) for corename in corenames}
#repl = wolfmath.MathRepl()
def get_exact_answer(corename, args):
mathfn = maths[corename][1]
expr = 'Block[{}, ' + mathfn + '; ex0[' + ', '.join([wolfmath.digital_to_math(ieee754.Float(arg)) for arg in args]) + ']]'
text_result = repl.evaluate_to_digits(expr)
return wolfmath.math_to_digital(text_result)
def gen_random_double_arguments(core):
rargs = [random_float64() for arg in core.inputs]
while not ieee754.Interpreter.interpret_pre(core, rargs, ctx=ctx512):
rargs = [random_float64() for arg in core.inputs]
return rargs
def run_example(corename, n):
core = cores[corename]
total_sink_prec = 0.0
total_bits_acc = 0.0
total_sink_acc = 0.0
rejects = 0
for trial in range(n):
rargs = gen_random_double_arguments(core)
exactish_result = ieee754.Interpreter.interpret(core, rargs, ctx=ctx512)
sinking_result = sinking.Interpreter.interpret(core, rargs, ctx=ctx_double)
exact_result = get_exact_answer(corename, rargs)
bits_acc = gmpmath.geo_sim(exactish_result, sinking_result)
exact_acc = gmpmath.geo_sim(exact_result, sinking_result)
print(sinking_result)
print(exactish_result)
print(exact_result)
print('exactish acc: {}'.format(bits_acc))
print('exact acc: {}'.format(exact_acc))
print('sinking precision: {}'.format(sinking_result.p))
print('\n\n')
if math.isnan(bits_acc):
rejects += 1
else:
bits_acc = min(max(0, bits_acc), 53)
total_sink_prec += sinking_result.p
total_bits_acc += bits_acc
total_sink_acc += (bits_acc - sinking_result.p)
print('{}: {:d} trials with {:d} rejections\n\n'.format(corename, n, rejects))
successes = n - rejects
return total_sink_prec / successes, total_bits_acc / successes, total_sink_acc / successes
def make_table():
for name in corenames:
sink_prec, bits_acc, sink_acc = run_example(name, 5)
print(repr(sink_prec), repr(bits_acc), repr(sink_acc))
def accbench(use_posit=False, es=8, nbits=32):
core = cores['accbench']
if use_posit:
ctx = evalctx.PositCtx(es=es, nbits=nbits)
return posit.Interpreter.interpret(core, [], ctx=ctx)
else:
ctx = evalctx.IEEECtx(es=es, nbits=nbits)
return ieee754.Interpreter.interpret(core, [], ctx=ctx)
annotated_accbench = """(FPCore ()
:name "Accuracy on a 32-bit budget"
(! :precision {} (pow
(! :precision {} (/
(! :precision {} (- (! :precision {} (/ (! :precision binary32 27) (! :precision binary32 10))) (! :precision {} E)))
(! :precision {} (- (! :precision {} PI) (! :precision {} (+ (! :precision {} (sqrt 2)) (! :precision {} (sqrt 3))))))
))
(! :precision {} (/ (! :precison binary32 67) (! :precision binary32 16)))
))
)
"""
accbench_pt1 = """(FPCore ()
(- (/ 27 10) E))
"""
accbench_pt2 = """(FPCore ()
(- PI (+ (sqrt 2) (sqrt 3))))
"""
annotated_accbench_2 = """(FPCore ()
:name "Accuracy on a 32-bit budget"
(! :precision {} (pow
(! :precision {} (/
(! :precision {} (- (/ (! :precision binary32 27) (! :precision binary32 10)) (! :precision {} E)))
(! :precision {} (- (! :precision {} PI) (+ (! :precision {} (sqrt 2)) (! :precision {} (sqrt 3)))))
))
(! :precision {} (/ (! :precison binary32 67) (! :precision binary32 16)))
))
)
"""
def runit(text, es, nbits=32):
core = fpcparser.compile1(text)
ctx = evalctx.IEEECtx(es=es, nbits=nbits)
return float(ieee754.Interpreter.interpret(core, [], ctx=ctx))
def accbench2(precs):
annotations = ['(float {:d} 32)'.format(i) for i in precs]
if len(annotations) < 11:
annotations += ['binary32'] * (11 - len(annotations))
text = annotated_accbench.format(*annotations)
core = fpcparser.compile1(text)
ctx = evalctx.IEEECtx(es=8, nbits=32)
return ieee754.Interpreter.interpret(core, [], ctx=ctx)
def accbench3(annotations, ctx=None):
text = annotated_accbench.format(*annotations)
core = fpcparser.compile1(text)
if ctx is None:
ctx = evalctx.IEEECtx(es=8, nbits=32)
return mpmf.Interpreter.interpret(core, [], ctx=ctx)
def accbench4(annotations, r, ctx):
final_annotations = annotations[0:3] + [r] + annotations[3:4] + [r,r,r] + annotations[4:5]
text = annotated_accbench_2.format(*final_annotations)
core = fpcparser.compile1(text)
return mpmf.Interpreter.interpret(core, [], ctx=ctx)
reference_answer = ieee754.Float(302.88271965546954925)
def accbench_acc(x):
fx = float(x)
if fx <= 0 or fx >= 1000000 or math.isnan(fx):
return None
else:
return gmpmath.geo_sim10(x, reference_answer)
print(str(reference_answer))
print(str(ieee754.Float(float(accbench2([5, 8, 2, 3, 2, 2, 2, 2, 2, 2, 4])), ctx4096)))
print(accbench_acc(accbench2([5, 8, 2, 3, 2, 2, 2, 2, 2, 2, 4])))
# sum posits
thing1 = accbench(True, 1, 32)
print(str(ieee754.Float(thing1, ctx4096)))
print(accbench_acc(thing1))
def accbench_uniform_floats():
for i in range(5,23):
annot = '(float {:d} 32)'.format(i)
ctx = evalctx.IEEECtx(es=i, nbits=32)
result = accbench4([annot] * 5, annot, ctx)
print(str(ieee754.Float(float(result), ctx4096)), accbench_acc(result), sep='\t')
def accbench_uniform_posits():
for i in range(0,18):
annot = '(posit {:d} 32)'.format(i)
ctx = evalctx.PositCtx(es=i, nbits=32)
result = accbench4([annot] * 5, annot, ctx)
print(str(ieee754.Float(float(result), ctx4096)), accbench_acc(result), sep='\t')
def float_n_32(i,j):
return ['(float {:d} 32)'.format(n) for n in range(i,j)]
def posit_n_32(i,j):
return ['(posit {:d} 32)'.format(n) for n in range(i,j)]
def better_accsweep(kind):
if kind == 'mpmf':
ctx = evalctx.IEEECtx(es=8, nbits=32)
choices = float_n_32(2,9) + posit_n_32(0,5)
choices_r = float_n_32(2,4) + posit_n_32(0,2)
elif kind == 'float':
ctx = evalctx.IEEECtx(es=8, nbits=32)
choices = float_n_32(1,12)
choices_r = float_n_32(1,12)
elif kind == 'posit':
ctx = evalctx.PositCtx(es=1, nbits=32)
choices = posit_n_32(0,11)
choices_r = posit_n_32(0,11)
else:
raise ValueError('kind must be in {"mpmf", "float", "posit"}')
count = 0
total = (len(choices) ** 5) * len(choices_r)
onprog = total // 10
print('sweeping {} configurations'.format(total))
best = None
for A in choices:
for B in choices:
for C in float_n_32(2,13) + posit_n_32(0,11):
for D in choices:
for E in choices:
for R in choices_r:
result = accbench4([A,B,C,D,E], R, ctx)
acc = accbench_acc(result)
if acc is not None and (best is None or acc > best):
best = acc
print([A,B,C,D,E], R, str(result), acc, sep='\t', flush=True)
count += 1
if count % onprog == 0:
print(count, flush=True)
choices = [2,3,4,5,6,7,8,9]
choices2 = [2,3,4]
choices_c = [2]
def accsweep_yolo():
best = None
for i1 in choices: # pow (A)
for i2 in choices: # div (B)
for i3 in choices2: # sub 2.7 - E (C)
for i4 in choices2: # div -> 2.7
for i5 in choices_c: # E
for i6 in choices2: # sub PI - sqrt2 - sqrt3 (D)
for i7 in choices_c: # PI
for i8 in choices_c: # sqrt2
for i9 in choices_c: # sqrt3
for i10 in choices2: # add sqrt2 + sqrt3
for i11 in choices: # div -> 67/16 (E)
result = accbench2([i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11])
acc = accbench_acc(result)
if acc is not None and (best is None or acc > best):
best = acc
print([i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11])
print(str(result))
print(acc)
sinkref = ieee754.Float(7.741315095243402580371)
def sinkbench_acc(x):
fx = float(x)
if fx <= 0 or fx >= 1000000 or math.isnan(fx):
return None
else:
return gmpmath.geo_sim(x, sinkref)
def sinkbench(w, p):
core = cores['sinkbench']
ctx = evalctx.IEEECtx(es=w, nbits=p+es)
return sinking.Interpreter.interpret(core, [], ctx=ctx)
def sinksweep():
print('{},{}\t{}'.format('w', 'p', str(sinkref)))
for i in range(2, 31):
w = i
p = 32 - i
sunk = sinkbench(w, p)
bits_acc = sinkbench_acc(sunk)
#print('{:d},{:d}\t{:12s}\t{:d}, {:d}, {}'.format(w, p, str(sunk) + str(sunk.n + 1), sunk.p, sunk.n + 1, bits_acc))
if bits_acc is None:
bits_acc = -1
print('{:d} & {:s} & {:d} & {:0.1f}'.format(w, str(sunk), sunk.p, bits_acc))
def quadratic(a, herbified=False):
if herbified:
core = cores['herbified_quadratic']
else:
core = cores['quadratic']
ctx = evalctx.IEEECtx(es=11, nbits=64)
inputs = [sinking.Sink(a, ctx), sinking.Sink(2.0, ctx), sinking.Sink(3.0, ctx)]
result = sinking.Interpreter.interpret(core, inputs, ctx=ctx)
print(str(result))
print(result.p)
print(result.n + 1)
return result
def congapaper():
print('pi')
print(str(sinking.Sink(math.pi).add(sinking.Sink('1e16')).sub(sinking.Sink('1e16'))))
print()
print('sinksweep')
sinksweep()
print()
quadargs = ['0.1', '0.001', '1e-9', '1e-15', '1e-16', '1e-17']
print('quadratic')
for a in quadargs:
print(' ' + a)
quadratic(a, herbified=False)
print()
print('herbified quadratic')
for a in quadargs:
print(' ' + a)
quadratic(a, herbified=True)
print()
arclen_fpcore = """(FPCore ((! :precision (fixed 64 -1) n))
:name "arclength"
:cite (precimonious-2013)
:precision {}
:pre (>= n 0)
(let ((dppi (acos -1)))
(let ((h (/ dppi n)))
(while
(<= i n)
((s1
0
(let ((t2
(let ((x (* i h)))
(while
(<= k 5)
((d0
(! :precision binary32 2)
(! :precision binary32 (* 2 d0)))
(t0 x (+ t0 (/ (sin (* d0 x)) d0)))
(k (! :precision binary32 1) (! :precision binary32 (+ k 1))))
t0))))
(let ((s0 (sqrt (+ (* h h) (* (- t2 t1) (- t2 t1))))))
(! :precision {} (+ s1 s0)))))
(t1
0
(let ((t2
(let ((x (* i h)))
(while
(<= k 5)
((d0
(! :precision binary32 2)
(! :precision binary32 (* 2 d0)))
(t0 x (+ t0 (/ (sin (* d0 x)) d0)))
(k (! :precision binary32 1) (! :precision binary32 (+ k 1))))
t0))))
t2))
(i
(! :precision (fixed 64 -1) 1)
(! :precision (fixed 64 -1) (+ i 1))))
s1))))
"""
old_arclen_fpcore = """(FPCore ((! :precision (fixed 64 -1) n))
:name "arclength"
:cite (precimonious-2013)
:precision {}
:pre (>= n 0)
(let ((dppi (acos -1)))
(let ((h (/ dppi n)))
(while
(<= i n)
((s1
0
(let ((t2
(let ((x (* i h)))
(while
(<= k 5)
((d0
(! :precision binary32 2)
(! :precision binary32 (* 2 d0)))
(t0 x (+ t0 (/ (sin (* d0 x)) d0)))
(k (! :precision binary32 1) (! :precision binary32 (+ k 1))))
t0))))
(let ((s0 (sqrt (+ (* h h) (* (- t2 t1) (- t2 t1))))))
(! :precision {} (+ s1 s0)))))
(t1
0
(let ((t2
(let ((x (* i h)))
(while
(<= k 5)
((d0
(! :precision binary32 2)
(! :precision binary32 (* 2 d0)))
(t0 x (+ t0 (/ (sin (* d0 x)) d0)))
(k (! :precision binary32 1) (! :precision binary32 (+ k 1))))
t0))))
t2))
(i
(! :precision (fixed 64 -1) 1)
(! :precision (fixed 64 -1) (+ i 1))))
s1))))
"""
arclen_fpcore = """(FPCore ((! :precision binary64 n))
:name "arclength"
:cite (precimonious-2013)
:precision {}
:pre (>= n 0)
(let* ([dppi PI]
[h (/ dppi n)]
[t1 0])
(while* (<= i n)
([t2 0
(let ([x (* i h)])
(while* (<= k 5)
([d1 (! :precision binary32 1)
(! :precision binary32 (* d1 2))]
[t1 x
(+ t1 (/ (sin (* d1 x)) d1))]
[k (! :precision binary32 1) (! :precision binary32 (+ k 1))])
t1))]
[s1 0
(let ([s0 (sqrt (+ (* h h) (* (- t2 t1) (- t2 t1))))])
(! :precision {} (+ s1 s0)))]
[t1 t1 t2]
[i (! :precision binary64 1) (! :precision binary64 (+ i 1))])
s1)))
"""
arclen_reference = ieee754.Float('5.7957763227371650583')
def arclen_acc(x):
fx = float(x)
if fx <= 0 or fx >= 1000000 or math.isnan(fx):
return None
else:
return gmpmath.geo_sim(x, arclen_reference)
def arclen_bench(overall, accumulate, n, interpreter=mpmf.Interpreter):
text = arclen_fpcore.format(overall, accumulate)
core = fpcparser.compile1(text)
result = interpreter.interpret(core, [n], ctx=None)
return result, arclen_acc(result)
def print_arclen_bench(overall, accumulate, n, interpreter=mpmf.Interpreter):
print('{}\t{}\t{}\t'.format(overall, accumulate, str(n)), end='', flush=True)
value, acc = arclen_bench(overall, accumulate, n, interpreter=interpreter)
print('{}\t{:.2f}'.format(str(value), float(acc)), flush=True)
print('')
# repro
def tab1_a():
repro_n = 100000
print_arclen_bench('(float 16 64)', '(float 16 64)', repro_n)
print_arclen_bench('float64', '(float 16 64)', repro_n)
print_arclen_bench('float64', 'float64', repro_n)
print_arclen_bench('float32', '(float 16 64)', repro_n)
def tab1_b():
print_arclen_bench('(float 16 64)', '(float 16 64)', 10000)
print_arclen_bench('(float 16 64)', '(float 16 64)', 1000)
print_arclen_bench('(float 16 64)', '(float 16 64)', 100)
print_arclen_bench('float32', 'float32', 100)
def tab2_a():
repro_n = 100000
# print_arclen_bench('(float 16 64)', '(fixed 128 -126)', repro_n)
# print_arclen_bench('posit64', '(fixed 128 -126)', repro_n)
# print_arclen_bench('posit32', '(fixed 128 -126)', repro_n)
print_arclen_bench('posit32', 'posit32', repro_n)
def tab2_b():
print_arclen_bench('posit16', '(fixed 64 -49)', 10)
print_arclen_bench('posit16', '(fixed 64 -49)', 100)
print_arclen_bench('posit16', '(fixed 64 -49)', 1000)
print_arclen_bench('posit8', '(fixed 64 -49)', 10)
print_arclen_bench('posit8', '(fixed 64 -49)', 100)
print_arclen_bench('posit8', '(fixed 64 -49)', 1000)
def sweep_accumulate(overall, start, end, n_start, n_end, n_step):
print('prec\t' + '\t'.join((str(n) for n in range(n_start, n_end+n_step, n_step))),flush=True)
for accbits in range(start, end+1):
print(accbits, end='')
for n in range(n_start, n_end+n_step, n_step):
accumulate = '(fixed {} {})'.format(accbits, (-accbits) + 2)
result, acc = arclen_bench(overall, accumulate, n)
if acc is None or acc < 0:
acc = 0
print('\t{:.3f}'.format(acc), end='')
print(flush=True)
def sweep_overall(accumulate, start, end, n_start, n_end, n_step):
print('prec\t' + '\t'.join((str(n) for n in range(n_start, n_end+n_step, n_step))),flush=True)
for accbits in range(start, end+1):
print(accbits, end='')
for n in range(n_start, n_end+n_step, n_step):
overall = '(posit 1 {})'.format(accbits)
result, acc = arclen_bench(overall, accumulate, n)
if acc is None or acc < 0:
acc = 0
print('\t{:.3f}'.format(acc), end='')
print(flush=True)
#sweep_accumulate('(posit 1 16)', 3, 20, 5, 200, 5)
#sweep_overall('(fixed 64 -49)', 8, 16, 5, 200, 5)
def ada_tab1():
print_arclen_bench('binary64', '(fixed 128 -120)', 10000)
print_arclen_bench('binary64', '(float 15 64)', 10000)
print_arclen_bench('binary32', 'binary32', 10000)
print_arclen_bench('binary64', '(fixed 128 -120)', 100000)
print_arclen_bench('binary64', '(float 15 64)', 100000)
print_arclen_bench('binary32', 'binary32', 100000)
print_arclen_bench('posit32', 'posit32', 10000)
print_arclen_bench('posit32', 'posit32', 100000)
print_arclen_bench('posit32', '(fixed 128 -120)', 100000)
def ada_tab1b():
print_arclen_bench('posit16', '(fixed 64 -49)', 1000)
print_arclen_bench('(float 5 11)', '(fixed 64 -49)', 1000)
print_arclen_bench('(float 8 8)', '(fixed 64 -49)', 1000)
print_arclen_bench('posit8', '(fixed 64 -49)', 10)
print_arclen_bench('posit8', '(fixed 64 -49)', 100)
print_arclen_bench('posit8', '(fixed 64 -49)', 1000)
def ada_tab2():
print_arclen_bench('binary64', '(float 15 64)', 10000, interpreter=sinking.Interpreter)
print_arclen_bench('binary64', '(float 15 64)', 100000, interpreter=sinking.Interpreter)
print_arclen_bench('binary32', 'binary32', 10000, interpreter=sinking.Interpreter)
print_arclen_bench('binary32', 'binary32', 100000, interpreter=sinking.Interpreter)
print_arclen_bench('binary32', '(float 15 64)', 10000, interpreter=sinking.Interpreter)
print_arclen_bench('binary32', '(float 15 64)', 100000, interpreter=sinking.Interpreter)
| mit |
Subsets and Splits