repo_name
stringlengths 7
90
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 762
838k
| license
stringclasses 15
values |
---|---|---|---|---|---|
aestrivex/mne-python | examples/inverse/plot_compute_mne_inverse_raw_in_label.py | 19 | 1614 | """
=============================================
Compute sLORETA inverse solution on raw data
=============================================
Compute sLORETA inverse solution on raw dataset restricted
to a brain label and stores the solution in stc files for
visualisation.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.io import Raw
from mne.minimum_norm import apply_inverse_raw, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
snr = 1.0 # use smaller SNR for raw data
lambda2 = 1.0 / snr ** 2
method = "sLORETA" # use sLORETA method (could also be MNE or dSPM)
# Load data
raw = Raw(fname_raw)
inverse_operator = read_inverse_operator(fname_inv)
label = mne.read_label(fname_label)
start, stop = raw.time_as_index([0, 15]) # read the first 15s of data
# Compute inverse solution
stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label,
start, stop, pick_ori=None)
# Save result in stc files
stc.save('mne_%s_raw_inverse_%s' % (method, label_name))
###############################################################################
# View activation time-series
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
| bsd-3-clause |
murali-munna/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
PatrickChrist/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
ecino/compassion-modules | sbc_compassion/__manifest__.py | 2 | 2915 | # -*- coding: utf-8 -*-
##############################################################################
#
# ______ Releasing children from poverty _
# / ____/___ ____ ___ ____ ____ ___________(_)___ ____
# / / / __ \/ __ `__ \/ __ \/ __ `/ ___/ ___/ / __ \/ __ \
# / /___/ /_/ / / / / / / /_/ / /_/ (__ |__ ) / /_/ / / / /
# \____/\____/_/ /_/ /_/ .___/\__,_/____/____/_/\____/_/ /_/
# /_/
# in Jesus' name
#
# Copyright (C) 2015-2017 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# pylint: disable=C8101
{
'name': "Sponsor to beneficiary communication",
'version': '10.0.1.6.0',
'category': 'Other',
'summary': "SBC - Supporter to Beneficiary Communication",
'sequence': 150,
'author': 'Compassion CH',
'license': 'AGPL-3',
'website': 'http://www.compassion.ch',
'depends': ['sponsorship_compassion', 'web_tree_image'],
'external_dependencies': {
'python': ['magic', 'wand', 'numpy', 'pyzbar', 'pdfminer',
'matplotlib', 'pyPdf', 'bs4', 'HTMLParser']
},
'data': [
'security/ir.model.access.csv',
'views/config_view.xml',
'views/contracts_view.xml',
'views/partner_compassion_view.xml',
'views/lang_compassion_view.xml',
'views/correspondence_view.xml',
'views/import_letters_history_view.xml',
'views/correspondence_template_view.xml',
'views/correspondence_template_page_view.xml',
'views/correspondence_template_crosscheck_view.xml',
'views/import_review_view.xml',
'views/download_letters_view.xml',
'views/get_letter_image_wizard_view.xml',
'views/correspondence_s2b_generator_view.xml',
'views/last_writing_report_view.xml',
'data/correspondence_template_data.xml',
'data/correspondence_type.xml',
'data/child_layouts.xml',
'data/gmc_action.xml',
],
'demo': ['demo/correspondence_template.xml',
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
reallyasi9/riddlers | boggle/visualization/plot.py | 1 | 1057 | #!/usr/bin/python3
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
df = pd.read_csv('boggle.csv', header=None,
names=['time','score'], usecols=[1,2])
x = df['time'] / 1000 # seconds
y = df['score'] # points
plt.figure(0)
plt.semilogx(x, y, color="#c05131", lw=2)
plt.hlines(4540, xmin=1e-1, xmax=1e17, color="#6c6f70", lw=1, linestyle='dashed')
plt.plot([x.max(), 1e17], [y.max(), 4540], color="#ef8200", lw=2, linestyle='dotted')
plt.ylabel("best score so far")
plt.xlabel("simulation time (s)")
plt.grid(b=True, which='major', axis='x', color="#6c6f70", lw=.5, linestyle='dotted')
boxstyle = {'fc':'white', 'lw':0, 'boxstyle':'round4'}
plt.text(.5, 2000, 'my simulations', color="#c05131", bbox=boxstyle)
plt.text(.5, 4300, 'best score from Sedgewick & Wayne students', color="#6c6f70", bbox=boxstyle)
plt.text(1e10, 2500, 'performance required\nto beat S&W students\nbefore sun goes nova', color="#ef8200", bbox=boxstyle)
plt.title("Plot to convince my wife I need to upgrade my computer")
plt.savefig('score.png')
| gpl-3.0 |
dougalsutherland/pummeler | pummeler/reader.py | 1 | 12549 | from copy import deepcopy
from functools import lru_cache
from pathlib import Path
import pandas as pd
weirds = """
SERIALNO
indp02 indp07 INDP
OCCP occp02 occp10 OCCP10 OCCP12
socp00 socp10 SOCP10 SOCP12 SOCP
naicsp02 naicsp07 NAICSP
""".split()
def read_chunks(
fname,
version,
chunksize=10 ** 5,
voters_only=False,
adj_inc=None,
adj_hsg=None,
housing_source=None, # func from (state, puma) => filename
housing_cache_size=8,
):
info = VERSIONS[version]
dtypes = {}
for k in info["meta_cols"] + info["discrete_feats"] + info["alloc_flags"]:
dtypes[k] = "category"
for k in info["real_feats"]:
dtypes[k] = "float64"
for k in info["weight_cols"]:
dtypes[k] = "Int64"
dtypes["SERIALNO"] = dtypes["serialno"] = "string"
if adj_inc and not info.get("to_adjinc"):
adj_inc = False
if adj_hsg and not info.get("to_adjhsg"):
adj_hsg = False
if housing_source is not None:
def get_housing_files(st_pumas):
return pd.concat(
[
load_file(fn)
for fn in {housing_source(st, puma) for st, puma in st_pumas}
]
)
@lru_cache(maxsize=housing_cache_size)
def load_file(fn):
fn = Path(fn)
if fn.suffix in {".pq", ".parquet"}:
df = pd.read_parquet(fn)
elif fn.suffix in {".h5", ".hdf5"}:
df = pd.read_hdf(fn)
else:
raise ValueError(f"unknown file format {fn.suffix!r}")
df.drop(
columns=["RT", "ST", "PUMA", "ADJINC_orig"],
errors="ignore",
inplace=True,
)
return df
chunks = pd.read_csv(
fname,
skipinitialspace=True,
na_values={k: ["N.A.", "N.A.//", "N.A.////"] for k in weirds},
dtype=dtypes,
chunksize=chunksize,
)
renames = None
for chunk in chunks:
if info.get("puma_subset", False):
puma_key = "PUMA{}".format(info["region_year"])
chunk = chunk[chunk[puma_key] != -9]
if chunk.shape[0] == 0:
continue
chunk["PUMA"] = chunk[puma_key]
del chunk["PUMA00"]
del chunk["PUMA10"]
if voters_only:
chunk = chunk[(chunk.AGEP >= 18) & (chunk.CIT != 5)]
if chunk.shape[0] == 0:
continue
if "drop_feats" in info: # TODO: pass usecols to read_csv instead...
chunk.drop(info["drop_feats"], axis=1, inplace=True)
if "renames" in info:
if renames is None:
renames = [info["renames"].get(k, k) for k in chunk.columns]
chunk.columns = renames
if adj_inc is None:
if "ADJINC" in chunk:
adj_inc = True
elif "ADJINC_orig" in chunk:
adj_inc = False
else:
raise ValueError(
"Unclear whether income has been adjusted, "
"and adj_inc is None; pass either True or "
"False explicitly"
)
if adj_inc:
adj = chunk.ADJINC / 1e6
for k in info["to_adjinc"]:
chunk[k] *= adj
chunk.rename(columns={"ADJINC": "ADJINC_orig"}, inplace=True)
if adj_hsg is None:
if "ADJHSG" in chunk:
adj_hsg = True
elif "ADJHSG_orig" in chunk:
adj_hsg = False
else:
raise ValueError(
"Unclear whether income has been adjusted, "
"and adj_hsg is None; pass either True or "
"False explicitly"
)
if adj_hsg:
adj = chunk.ADJHSG / 1e6
for k in info["to_adjhsg"]:
chunk[k] *= adj
chunk.rename(columns={"ADJHSG": "ADJHSG_orig"}, inplace=True)
if housing_source is not None:
housing = get_housing_files(chunk.groupby(["ST", "PUMA"]).groups)
chunk = chunk.merge(housing, on="SERIALNO", suffixes=(False, False))
yield chunk
def _s(s):
return sorted(s.split())
VERSIONS = {}
VERSIONS["2006-10"] = {
"weight_cols": ["PWGTP"] + ["PWGTP{}".format(i) for i in range(1, 81)],
"meta_cols": "RT SPORDER serialno PUMA ST".split(),
"discrete_feats": _s(
"""
CIT COW ENG FER GCL GCM GCR JWRIP JWTR LANX MAR MIG MIL MLPA MLPB
MLPC MLPD MLPE MLPF MLPG MLPH MLPI MLPJ MLPK NWAB NWAV NWLA NWLK
NWRE RELP SCH SCHG SCHL SEX WKL WKW ANC ANC1P ANC2P DECADE DRIVESP
ESP ESR HISP indp02 indp07 LANP MIGPUMA MIGSP MSP naicsp02 naicsp07
NATIVITY NOP OC occp02 occp10 PAOC POBP POWPUMA POWSP QTRBIR RAC1P
RAC2P RAC3P RACAIAN RACASN RACBLK RACNHPI RACSOR RACWHT RC SFN SFR
socp00 socp10 VPS WAOB"""
),
"alloc_flags": _s(
"""
FAGEP FANCP FCITP FCOWP FENGP FESRP FFERP FGCLP FGCMP FGCRP
FHISP FINDP FINTP FJWDP FJWMNP FJWRIP FJWTRP FLANP FLANXP FMARP
FMIGP FMIGSP FMILPP FMILSP FOCCP FOIP FPAP FPOBP FPOWSP FRACP
FRELP FRETP FSCHGP FSCHLP FSCHP FSEMP FSEXP FSSIP FSSP FWAGP
FWKHP FWKLP FWKWP FYOEP"""
),
"real_feats": _s(
"""
AGEP INTP JWMNP OIP PAP RETP SEMP SSIP SSP WAGP WKHP YOEP JWAP
JWDP PERNP PINCP POVPIP RACNUM"""
),
"to_adjinc": _s("INTP OIP PAP PERNP PINCP RETP SEMP SSIP SSP WAGP"),
"region_year": "00",
}
VERSIONS["2007-11"] = VERSIONS["2006-10"]
VERSIONS["2010-14_12-14"] = {
"weight_cols": ["PWGTP"] + ["PWGTP{}".format(i) for i in range(1, 81)],
"meta_cols": _s("RT SPORDER serialno PUMA ST"),
"alloc_flags": _s(
"""
FAGEP FCITP FCITWP FCOWP FDDRSP FDEARP FDEYEP FDOUTP FDPHYP
FDRATP FDRATXP FDREMP FENGP FESRP FFERP FFODP FGCLP FGCMP FGCRP
FHINS1P FHINS2P FHINS3C FHINS3P FHINS4C FHINS4P FHINS5C FHINS5P
FHINS6P FHINS7P FHISP FINDP FINTP FJWDP FJWMNP FJWRIP FJWTRP
FLANP FLANXP FMARHDP FMARHMP FMARHTP FMARHWP FMARHYP FMARP FMIGP
FMIGSP FMILPP FMILSP FOCCP FOIP FPAP FPOBP FPOWSP FRACP FRELP
FRETP FSCHGP FSCHLP FSCHP FSEMP FSEXP FSSIP FSSP FWAGP FWKHP
FWKLP FWKWP FWRKP FYOEP"""
),
"discrete_feats": _s(
"""
CIT COW DDRS DEAR DEYE DOUT DPHY DRAT DRATX DREM ENG FER GCL GCM
GCR HINS1 HINS2 HINS3 HINS4 HINS5 HINS6 HINS7 JWRIP JWTR LANX
MAR MARHD MARHM MARHT MARHW MARHYP MIG MIL MLPA MLPB MLPCD MLPE
MLPFG MLPH MLPI MLPJ MLPK NWAB NWAV NWLA NWLK NWRE RELP SCH SCHG
SCHL SEX WKL WKW WRK ANC ANC1P ANC2P DECADE DIS DRIVESP ESP ESR
FOD1P FOD2P HICOV HISP INDP LANP MIGPUMA MIGSP MSP NAICSP NATIVITY
NOP OC OCCP PAOC POBP POWPUMA POWSP PRIVCOV PUBCOV QTRBIR RAC1P
RAC2P RAC3P RACAIAN RACASN RACBLK RACNHPI RACSOR RACWHT RC SCIENGP
SCIENGRLP SFN SFR SOCP VPS WAOB"""
),
"real_feats": _s(
"""
AGEP CITWP INTP JWMNP OIP PAP RETP SEMP SSIP SSP WAGP WKHP YOEP
JWAP JWDP PERNP PINCP POVPIP RACNUM"""
),
"to_adjinc": _s("INTP OIP PAP PERNP PINCP RETP SEMP SSIP SSP WAGP"),
"drop_feats": _s(
"""
ANC1P05 ANC2P05 FANCP LANP05 MARHYP05 MIGPUMA00 MIGSP05 POBP05
POWPUMA00 POWSP05 RAC2P05 RAC3P05 SOCP10 YOEP05 CITWP05 OCCP10"""
),
# Always blank for 12-14 data
"renames": {
"ANC1P12": "ANC1P",
"ANC2P12": "ANC2P",
"LANP12": "LANP",
"MARHYP12": "MARHYP",
"MIGPUMA10": "MIGPUMA",
"MIGSP12": "MIGSP",
"OCCP12": "OCCP",
"POBP12": "POBP",
"POWPUMA10": "POWPUMA",
"POWSP12": "POWSP",
"RAC2P12": "RAC2P",
"RAC3P12": "RAC3P",
"SOCP12": "SOCP",
"YOEP12": "YOEP",
"CITWP12": "CITWP",
},
"region_year": "10",
"puma_subset": True,
}
VERSIONS["2011-15_12-15"] = v = deepcopy(VERSIONS["2010-14_12-14"])
v["alloc_flags"] = sorted(
v["alloc_flags"] + "FDISP FPINCP FPUBCOVP FPERNP FPRIVCOVP".split()
)
VERSIONS["2012-16"] = {
"weight_cols": ["PWGTP"] + ["PWGTP{}".format(i) for i in range(1, 81)],
"meta_cols": _s("RT SPORDER SERIALNO PUMA ST"),
"alloc_flags": _s(
"""
FAGEP FANCP FCITP FCITWP FCOWP FDDRSP FDEARP FDEYEP FDISP FDOUTP
FDPHYP FDRATP FDRATXP FDREMP FENGP FESRP FFERP FFODP FGCLP FGCMP
FGCRP FHINS1P FHINS2P FHINS3C FHINS3P FHINS4C FHINS4P
FHINS5C FHINS5P FHINS6P FHINS7P FHISP FINDP FINTP FJWDP FJWMNP
FJWRIP FJWTRP FLANP FLANXP FMARHDP FMARHMP FMARHTP FMARHWP FMARHYP
FMARP FMIGP FMIGSP FMILPP FMILSP FOCCP FOIP FPAP FPERNP FPINCP
FPOBP FPOWSP FPRIVCOVP FPUBCOVP FRACP FRELP FRETP FSCHGP FSCHLP
FSCHP FSEMP FSEXP FSSIP FSSP FWAGP FWKHP FWKLP FWKWP FWRKP
FYOEP
"""
),
"discrete_feats": _s(
"""
CIT COW DDRS DEAR DEYE DOUT DPHY DRAT DRATX DREM ENG FER GCL GCM
GCR HINS1 HINS2 HINS3 HINS4 HINS5 HINS6 HINS7 JWRIP JWTR LANX MAR
MARHD MARHM MARHT MARHW MARHYP MIG MIL MLPA MLPB MLPCD MLPE MLPFG
MLPH MLPI MLPJ MLPK NWAB NWAV NWLA NWLK NWRE RELP SCH SCHG SCHL SEX
WKL WKW WRK ANC ANC1P ANC2P DECADE DIS DRIVESP ESP ESR FOD1P FOD2P
HICOV HISP INDP LANP MIGPUMA MIGSP MSP NAICSP NATIVITY NOP OC OCCP
PAOC POBP POWPUMA POWSP PRIVCOV PUBCOV QTRBIR RAC1P RAC2P RAC3P
RACAIAN RACASN RACBLK RACNH RACPI RACSOR RACWHT RC SCIENGP SCIENGRLP
SFN SFR SOCP VPS WAOB
"""
),
"real_feats": _s(
"""
AGEP CITWP INTP JWMNP OIP PAP RETP SEMP SSIP SSP WAGP WKHP YOEP JWAP
JWDP PERNP PINCP POVPIP RACNUM
"""
),
"to_adjinc": _s("INTP OIP PAP PERNP PINCP RETP SEMP SSIP SSP WAGP"),
"renames": {"pwgtp{}".format(i): "PWGTP{}".format(i) for i in range(1, 81)},
"region_year": "10",
}
VERSIONS["2013-17"] = v = deepcopy(VERSIONS["2012-16"])
v["alloc_flags"] = sorted(v["alloc_flags"] + ["FHICOVP"])
v["drop_feats"] = sorted(v.get("drop_feats", []) + "REGION DIVISION".split())
VERSIONS["2014-18"] = deepcopy(VERSIONS["2013-17"])
VERSIONS["2015"] = deepcopy(VERSIONS["2013-17"])
VERSIONS["housing_2014-18"] = v = {
"weight_cols": ["WGTP"] + [f"WGTP{i}" for i in range(1, 81)],
"meta_cols": _s("RT SERIALNO PUMA ST"),
"alloc_flags": _s(
"""
FACCESSP FACRP FAGSP FBATHP FBDSP FBLDP FBROADBNDP FCOMPOTHXP
FBUSP FCONP FDIALUPP FELEP FFINCP FFSP FFULP FGASP FGRNTP
FHFLP FHINCP FHISPEEDP FHOTWATP FINSP FKITP FLAPTOPP FMHP FMRGIP
FMRGP FMRGTP FMRGXP FMVP FOTHSVCEXP FPLMP FPLMPRP FREFRP FRMSP
FRNTMP FRNTP FRWATP FRWATPRP FSATELLITEP FSINKP FSMARTPHONP
FSMOCP FSMP FSMXHP FSMXSP FSTOVP FTABLETP FTAXP FTELP FTENP
FTOILP FVACSP FVALP FVEHP FWATP FYBLP
"""
),
"discrete_feats": _s(
"""
NP TYPE
ACCESS ACR AGS BATH BDSP BLD BUS BROADBND COMPOTHX DIALUP
FS HFL HISPEED HOTWAT LAPTOP
MRGI MRGT MRGX OTHSVCEX REFR RMSP RNTM RWAT RWATPR
SATELLITE SINK SMARTPHONE STOV TABLET TEL TEN TOIL VACS
VEH YBL FES
FPARC
HHL HHT HUGCL HUPAC HUPAOC HUPARC KIT LNGI MULTG
MV NOC NPF NPP NR NRC PARTNER PLM PLMPRP PSF R18 R60 R65
RESMODE SMX SRNT SSMC SVAL WIF WKEXREL WORKSTAT
ELEFP FULFP GASFP WATFP
"""
),
"real_feats": _s("VALP GRPIP OCPIP"),
"to_adjhsg": _s(
"""
CONP ELEP FULP GASP INSP MHP MRGP RNTP SMP WATP
GRNTP SMOCP TAXAMT
"""
),
"to_adjinc": _s("FINCP HINCP"),
"drop_feats": _s("DIVISION REGION"),
"region_year": "10",
}
v["real_feats"] = sorted(set(v["real_feats"]) | set(v["to_adjhsg"]))
v["real_feats"] = sorted(set(v["real_feats"]) | set(v["to_adjinc"]))
def version_info_with_housing(name, housing_name=None):
if housing_name is None:
housing_name = f"housing_{name}"
h = VERSIONS[housing_name]
v = deepcopy(VERSIONS[name])
v["real_feats"] += h["real_feats"]
v["discrete_feats"] += h["discrete_feats"]
v["alloc_flags"] += h["alloc_flags"]
v["weight_cols"] += h["weight_cols"]
return v
| mit |
sergiopasra/pyemir | setup.py | 3 | 4552 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='pyemir',
version='0.16.dev0',
author='Sergio Pascual',
author_email='[email protected]',
url='https://github.com/guaix-ucm/pyemir',
license='GPLv3',
description='EMIR Data Processing Pipeline',
packages=find_packages(),
package_data={
'emirdrp.simulation': ['*.dat'],
'emirdrp.instrument.configs': [
'bars_nominal_positions_test.txt',
'component-*.json',
'instrument-*.json',
'lines_argon_neon_xenon_empirical.dat',
'lines_argon_neon_xenon_empirical_LR.dat',
'Oliva_etal_2013.dat',
'setup-*.json'
],
'emirdrp': ['drp.yaml'],
},
test_suite="emirdrp.tests",
install_requires=[
'setuptools>=36.2.1', 'numpy', 'scipy',
'numina>=0.22', 'astropy>=2',
'enum34;python_version<"3.4"',
'matplotlib', 'six', 'photutils>=0.2',
'sep>0.5', 'scikit-image>=0.11', 'scikit-learn>=0.19', 'lmfit'
],
extras_require={
'tools': ["PyQt5"],
'docs': ["sphinx"],
'test': ['pytest', 'pytest-remotedata']
},
zip_safe=False,
entry_points={
'numina.pipeline.1': [
'EMIR = emirdrp.loader:load_drp',
],
'console_scripts': [
'pyemir-apply_rectification_only = ' +
'emirdrp.tools.apply_rectification_only:main',
'pyemir-apply_rectwv_coeff = ' +
'emirdrp.processing.wavecal.apply_rectwv_coeff:main',
'pyemir-continuum_flatfield = ' +
'emirdrp.tools.continuum_flatfield:main',
'pyemir-convert_refined_multislit_param = ' +
'emirdrp.tools.convert_refined_multislit_bound_param:main',
'pyemir-display_slitlet_arrangement = ' +
'emirdrp.tools.display_slitlet_arrangement:main',
'pyemir-fit_boundaries = ' +
'emirdrp.tools.fit_boundaries:main',
'pyemir-generate_yaml_for_abba = ' +
'emirdrp.tools.generate_yaml_for_abba:main',
'pyemir-generate_yaml_for_dithered_image = ' +
'emirdrp.tools.generate_yaml_for_dithered_image:main',
'pyemir-median_slitlets_rectified = ' +
'emirdrp.processing.wavecal.median_slitlets_rectified:main',
'pyemir-merge_bounddict_files = ' +
'emirdrp.tools.merge_bounddict_files:main',
'pyemir-merge2images = ' +
'emirdrp.tools.merge2images:main',
'pyemir-rectwv_coeff_add_longslit_model = ' +
'emirdrp.processing.wavecal.rectwv_coeff_add_longslit_model:main',
'pyemir-rect_wpoly_for_mos = ' +
'emirdrp.tools.rect_wpoly_for_mos:main',
'pyemir-rectwv_coeff_from_arc_image = ' +
'emirdrp.processing.wavecal.rectwv_coeff_from_arc_image:main',
'pyemir-rectwv_coeff_from_mos_library = ' +
'emirdrp.processing.wavecal.rectwv_coeff_from_mos_library:main',
'pyemir-rectwv_coeff_to_ds9 = ' +
'emirdrp.processing.wavecal.rectwv_coeff_to_ds9:main',
'pyemir-select_unrectified_slitlets = ' +
'emirdrp.tools.select_unrectified_slitlets:main',
'pyemir-slitlet_boundaries_from_continuum = ' +
'emirdrp.tools.slitlet_boundaries_from_continuum:main',
'pyemir-overplot_boundary_model = ' +
'emirdrp.processing.wavecal.overplot_boundary_model:main',
'pyemir-overplot_bounddict = ' +
'emirdrp.tools.overplot_bounddict:main',
],
},
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
'Development Status :: 3 - Alpha',
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Astronomy",
],
long_description=open('README.rst').read()
)
| gpl-3.0 |
aflaxman/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 37 | 4608 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
# #############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
# #############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
# #############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
# #############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
Gordonei/BlackBoxDataVisualiser | BlackBoxDataVisualiser.py | 1 | 11512 | #!/usr/bin/env python
import sys,copy
import numpy, matplotlib, matplotlib.pyplot as plt
from matplotlib.widgets import Slider,RadioButtons,CheckButtons
def headerSnoop(raw_data,col_separator):
"""
Helper function for trying to detect where the data file header ends and the data begins.
"""
header_length_guess = 0
for i,rd in enumerate(numpy.array(raw_data)):
#This probably isn't a very Pythonic way to do this...
try:
numpy.array(rd.split(col_separator)[:-1]).astype(numpy.float)
if not(header_length_guess): header_length_guess = i
except(ValueError): pass
return header_length_guess
def Read(filename,strip_whitespace=True,row_separator='\n',col_separator=',',header_rows=0,title_row=0):
"""
High level function for reading in data from a flat file.
"""
#Reading data in from file
datafile = open(filename,"r")
raw_data = datafile.read().split(row_separator)
if(strip_whitespace): raw_data = filter(None,raw_data)
#Assuming title is first line and column Headers are the last line of the header
title_row = 0
header_row = header_rows-1
#If header rows aren't specified, use the snooper helper function to try find them
if not(header_rows):
header_rows = headerSnoop(raw_data,col_separator)
title_row = header_rows-2
title = raw_data[title_row].strip(col_separator)
headers = raw_data[header_rows-1].split(col_separator)
#Turning data in numpy array
if(strip_whitespace): data = [tuple(filter(None,d.split(col_separator))) for d in raw_data[header_rows:]]
else: data = [tuple(d.split(col_separator)) for d in raw_data[header_rows:]]
#Adding the headers to the datafile
number_formats = []
for col in numpy.array(data).transpose():
try:
col.astype(numpy.float)
number_formats.append("d")
except: number_formats.append("a")
datatypes = []
for dt,nf in zip(headers,number_formats): datatypes.append((dt,nf))
data = numpy.array(data,dtype=datatypes)
return {"title":title,"data":data}
def drawViewFigure(viewer_fig,data_dicts,x_label,y_label,parameter_selection,plot_type=matplotlib.lines.Line2D,colour_seed=1234):
"""
Helper function for drawing the view window for the x and y labels initially
"""
#viewer_fig.clear()
plt.figure(viewer_fig.number)
viewer_ax = viewer_fig.add_subplot(111)
numpy.random.seed(colour_seed)
for dd in data_dicts:
title = dd["title"]
data = numpy.copy(dd["data"])
for parameter in parameter_selection.keys(): data = data[data[parameter]==parameter_selection[parameter]]
colour_value = (numpy.random.random(),numpy.random.random(),numpy.random.random())
if(x_label in data.dtype.names and y_label in data.dtype.names and plot_type==matplotlib.lines.Line2D):
viewer_ax.plot(data[x_label],data[y_label],"-o",label=title,color=colour_value)
elif(x_label in data.dtype.names and y_label in data.dtype.names and plot_type==matplotlib.collections.PathCollection):
viewer_ax.scatter(data[x_label],data[y_label],label=title,color=colour_value)
viewer_ax.set_xlabel(x_label)
viewer_ax.set_ylabel(y_label)
viewer_fig.canvas.draw()
plt.legend(loc='best')
def redrawViewFigure(viewer_fig,data_dicts,x_label,y_label,parameter_selection,plot_type=matplotlib.lines.Line2D):
"""
Helper function for redrawing the view window for the x and y labels.
"""
global old_x_label,old_y_label
plt.figure(viewer_fig.number)
viewer_ax = plt.gca()
min_x = None
min_y = None
max_x = None
max_y = None
count = 0
for vac in viewer_ax.get_children():
if(isinstance(vac,plot_type)):
title = data_dicts[count]["title"]
data = numpy.copy(data_dicts[count]["data"])
for parameter in parameter_selection.keys():
data = data[data[parameter]==parameter_selection[parameter]]
if(x_label in data.dtype.names and y_label in data.dtype.names and plot_type==matplotlib.lines.Line2D):
vac.set_xdata(data[x_label])
vac.set_ydata(data[y_label])
elif(x_label in data.dtype.names and y_label in data.dtype.names and plot_type==matplotlib.collections.PathCollection):
vac.set_offsets([data[x_label],data[y_label]])
if(data.size>0):
if(not min_x or min_x>min(data[x_label])): min_x = min(data[x_label])
if(not max_x or max_x<max(data[x_label])): max_x = max(data[x_label])
if(not min_y or min_y>min(data[y_label])): min_y = min(data[y_label])
if(not max_y or max_y<max(data[y_label])): max_y = max(data[y_label])
old_x_label = x_label
old_y_label = y_label
count += 1
viewer_ax.set_xlabel(x_label)
viewer_ax.set_ylabel(y_label)
if(plot_type==matplotlib.lines.Line2D):
viewer_ax.relim()
viewer_ax.autoscale_view(True,True,True)
elif(plot_type==matplotlib.collections.PathCollection):
viewer_ax.set_xlim(min_x,max_x)
viewer_ax.set_ylim(min_y,max_y)
viewer_ax.autoscale_view(True,True,True)
viewer_fig.canvas.draw()
def drawAxesControls(controls_fig,viewer_fig,columns):
"""
Helper function for adding the buttons for controlling the axes to the control view window
"""
axcolor = 'w'
char_width = max(map(lambda x:len(x),columns)) #finding the widest column title
#X offset, y offset, x length, y length
rax = plt.axes([0.05, 0.1 + 0.03*len(columns) + 0.1, 0.015*char_width, 0.03*len(columns)], axisbg=axcolor,title="X Axis")
radio = RadioButtons(rax, tuple(columns))
def x_axesChange(label):
global x_label,y_label,parameter_selection,data_dicts #Should probably do this in a more OOP way
x_label = label
redrawViewFigure(viewer_fig,data_dicts,x_label,y_label,parameter_selection)
radio.on_clicked(x_axesChange)
rax2 = plt.axes([0.05, 0.1, 0.015*char_width, 0.03*len(columns)], axisbg=axcolor,title="Y Axis")
radio2 = RadioButtons(rax2, tuple(columns))
def y_axesChange(label):
global x_label,y_label,parameter_selection,data_dicts
y_label = label
redrawViewFigure(viewer_fig,data_dicts,x_label,y_label,parameter_selection)
radio2.on_clicked(y_axesChange)
return [radio,radio2]
def drawParameterListControls(control_fig,viewer_fig,columns,default_values):
"""
Helper function for adding the check boxes for controlling the parameter list
"""
axcolor = 'w'
char_width = max(map(lambda x:len(x),columns)) #finding the widest column title
rax = plt.axes([0.05, 0.2 + 0.03*len(columns)*2 + 0.1, 0.015*char_width, 0.03*len(columns)], axisbg=axcolor,title="Parameter List")
check = CheckButtons(rax, tuple(columns), ([True]*len(columns)))
def parameterlistChange(label):
global x_label,y_label,parameter_selection,data_dicts
if(label in parameter_selection.keys()): del parameter_selection[label]
else: parameter_selection[label] = default_values[columns.index(label)]
redrawViewFigure(viewer_fig,data_dicts,x_label,y_label,parameter_selection)
check.on_clicked(parameterlistChange)
return check
def drawParameterControls(control_fig,viewer_fig,columns,default_values,min_values,max_values):
"""
Helper function for adding the sliders for controlling the values of paramers
"""
axcolor = 'w'
char_width = max(map(lambda x:len(x),columns)) #finding the widest column title
saxes = []
sliders = {}
#Generating the Sliders
for i,c in enumerate(columns):
saxes.append(plt.axes([0.05 + 0.015*char_width + 0.01*char_width + 0.05, (0.01 + 0.03)*i + 0.1, 0.02*char_width, 0.03], axisbg=axcolor))
sliders[c] = Slider(saxes[-1], c, min_values[i], max_values[i], valinit=default_values[i])
#The callback function for the sliders
def parameterChange(value,column):
global x_label,y_label,parameter_selection,data_dicts
changed = False
if(column in parameter_selection.keys()):
if(parameter_selection[column]!=value):
for dd in data_dicts:
nearest_value = dd["data"][column][numpy.argmin(numpy.abs(dd["data"][column]-value))]
parameter_selection[column] = nearest_value
sliders[column].set_val(nearest_value)
changed = True
if(changed): redrawViewFigure(viewer_fig,data_dicts,x_label,y_label,parameter_selection)
#Creating and bind a unique function call for each slider
functions = []
for column in columns:
functions.append(lambda value,col=column: parameterChange(value,col))
sliders[column].on_changed(functions[-1])
return sliders
#Global variables used during plotting
x_label = 0
y_label = 1
parameter_selection = {}
data_dicts = []
old_x_label = 0
old_y_label = 0
def Plot(dd):
"""
High level function for plotting the data returned from the Read function
"""
viewer_fig = plt.figure()
global x_label,y_label,parameter_selection,data_dicts,old_x_label,old_y_label
data_dicts = dd
#Finding the column names
columns = []
default_values = []
min_values = []
max_values = []
for dd in data_dicts:
data = dd["data"]
for datatype_name in data.dtype.names:
if(datatype_name not in columns):
columns.append(datatype_name)
default_values.append(data[datatype_name][0])
min_values.append(min(data[datatype_name]))
max_values.append(max(data[datatype_name]))
#Setting Default Values
x_label = columns[0]
y_label = columns[1]
old_x_label = columns[0]
old_y_label = columns[1]
#Adding the other parameter types to the dictionary
for c in columns:
if(c is not x_label or c is not y_label): parameter_selection[c] = default_values[columns.index(c)]
plot_type = matplotlib.collections.PathCollection
#Draw viewer for the 1st time
drawViewFigure(viewer_fig,data_dicts,x_label,y_label,parameter_selection)
#Creating the controls
controls_fig = plt.figure()
#Drawing the radio buttons that control the X and Y Axes
axes_radio_buttons = drawAxesControls(controls_fig,viewer_fig,columns)
#Drawing the checklist used to control the paramters
parameter_list_checkboxes = drawParameterListControls(controls_fig,viewer_fig,columns,default_values)
#Drawing the sliders that can be used to set paramter values
parameter_sliders = drawParameterControls(controls_fig,viewer_fig,columns,default_values,min_values,max_values)
plt.show()
if __name__ == '__main__':
if(len(sys.argv)>1):
data_dicts = []
for filename in sys.argv[1:]: data_dicts.append(Read(filename))
Plot(data_dicts)
else:
print "usage: BlackBoxDataVisualiser [data file 1] [data file 2] ... [data file n]" | gpl-2.0 |
kevin-intel/scikit-learn | sklearn/metrics/pairwise.py | 2 | 69283 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
from functools import partial
import warnings
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from joblib import Parallel, effective_n_jobs
from ..utils.validation import _num_samples
from ..utils.validation import check_non_negative
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches, get_chunk_n_rows
from ..utils import is_scalar_nan
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..utils._mask import _get_mask
from ..utils.fixes import delayed
from ..utils.fixes import sp_version, parse_version
from ._pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
from ..exceptions import DataConversionWarning
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = float
return X, Y, dtype
def check_pairwise_arrays(X, Y, *, precomputed=False, dtype=None,
accept_sparse='csr', force_all_finite=True,
copy=False):
"""Set X and Y appropriately and checks inputs.
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
precomputed : bool, default=False
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : str, type, list of type, default=None
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
accept_sparse : str, bool or list/tuple of str, default='csr'
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
.. versionadded:: 0.22
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
"""Set X and Y appropriately and checks inputs for paired distances.
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, *, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation,
because this equation potentially suffers from "catastrophic cancellation".
Also, the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Y_norm_squared : array-like of shape (n_samples_Y,) or (n_samples_Y, 1) \
or (1, n_samples_Y), default=None
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
squared : bool, default=False
Return squared Euclidean distances.
X_norm_squared : array-like of shape (n_samples_X,) or (n_samples_X, 1) \
or (1, n_samples_X), default=None
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
Notes
-----
To achieve better accuracy, `X_norm_squared` and `Y_norm_squared` may be
unused if they are passed as ``float32``.
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
paired_distances : Distances betweens pairs of elements of X and Y.
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[0., 1.],
[1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
X_norm_squared = check_array(X_norm_squared, ensure_2d=False)
original_shape = X_norm_squared.shape
if X_norm_squared.shape == (X.shape[0],):
X_norm_squared = X_norm_squared.reshape(-1, 1)
if X_norm_squared.shape == (1, X.shape[0]):
X_norm_squared = X_norm_squared.T
if X_norm_squared.shape != (X.shape[0], 1):
raise ValueError(
f"Incompatible dimensions for X of shape {X.shape} and "
f"X_norm_squared of shape {original_shape}.")
if Y_norm_squared is not None:
Y_norm_squared = check_array(Y_norm_squared, ensure_2d=False)
original_shape = Y_norm_squared.shape
if Y_norm_squared.shape == (Y.shape[0],):
Y_norm_squared = Y_norm_squared.reshape(1, -1)
if Y_norm_squared.shape == (Y.shape[0], 1):
Y_norm_squared = Y_norm_squared.T
if Y_norm_squared.shape != (1, Y.shape[0]):
raise ValueError(
f"Incompatible dimensions for Y of shape {Y.shape} and "
f"Y_norm_squared of shape {original_shape}.")
return _euclidean_distances(X, Y, X_norm_squared, Y_norm_squared, squared)
def _euclidean_distances(X, Y, X_norm_squared=None, Y_norm_squared=None,
squared=False):
"""Computational part of euclidean_distances
Assumes inputs are already checked.
If norms are passed as float32, they are unused. If arrays are passed as
float32, norms needs to be recomputed on upcast chunks.
TODO: use a float64 accumulator in row_norms to avoid the latter.
"""
if X_norm_squared is not None:
if X_norm_squared.dtype == np.float32:
XX = None
else:
XX = X_norm_squared.reshape(-1, 1)
elif X.dtype == np.float32:
XX = None
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if Y is X:
YY = None if XX is None else XX.T
else:
if Y_norm_squared is not None:
if Y_norm_squared.dtype == np.float32:
YY = None
else:
YY = Y_norm_squared.reshape(1, -1)
elif Y.dtype == np.float32:
YY = None
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X.dtype == np.float32:
# To minimize precision issues with float32, we compute the distance
# matrix on chunks of X and Y upcast to float64
distances = _euclidean_distances_upcast(X, XX, Y, YY)
else:
# if dtype is already float64, no need to chunk and upcast
distances = - 2 * safe_sparse_dot(X, Y.T, dense_output=True)
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
if X is Y:
np.fill_diagonal(distances, 0)
return distances if squared else np.sqrt(distances, out=distances)
def nan_euclidean_distances(X, Y=None, *, squared=False,
missing_values=np.nan, copy=True):
"""Calculate the euclidean distances in the presence of missing values.
Compute the euclidean distance between each pair of samples in X and Y,
where Y=X is assumed if Y=None. When calculating the distance between a
pair of samples, this formulation ignores feature coordinates with a
missing value in either sample and scales up the weight of the remaining
coordinates:
dist(x,y) = sqrt(weight * sq. distance from present coordinates)
where,
weight = Total # of coordinates / # of present coordinates
For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]``
is:
.. math::
\\sqrt{\\frac{4}{2}((3-1)^2 + (6-5)^2)}
If all the coordinates are missing or if there are no common present
coordinates then NaN is returned for that pair.
Read more in the :ref:`User Guide <metrics>`.
.. versionadded:: 0.22
Parameters
----------
X : array-like of shape=(n_samples_X, n_features)
Y : array-like of shape=(n_samples_Y, n_features), default=None
squared : bool, default=False
Return squared Euclidean distances.
missing_values : np.nan or int, default=np.nan
Representation of missing value.
copy : bool, default=True
Make and use a deep copy of X and Y (if Y exists).
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
paired_distances : Distances between pairs of elements of X and Y.
Examples
--------
>>> from sklearn.metrics.pairwise import nan_euclidean_distances
>>> nan = float("NaN")
>>> X = [[0, 1], [1, nan]]
>>> nan_euclidean_distances(X, X) # distance between rows of X
array([[0. , 1.41421356],
[1.41421356, 0. ]])
>>> # get distance to origin
>>> nan_euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
References
----------
* John K. Dixon, "Pattern Recognition with Partly Missing Data",
IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:
10, pp. 617 - 621, Oct. 1979.
http://ieeexplore.ieee.org/abstract/document/4310090/
"""
force_all_finite = 'allow-nan' if is_scalar_nan(missing_values) else True
X, Y = check_pairwise_arrays(X, Y, accept_sparse=False,
force_all_finite=force_all_finite, copy=copy)
# Get missing mask for X
missing_X = _get_mask(X, missing_values)
# Get missing mask for Y
missing_Y = missing_X if Y is X else _get_mask(Y, missing_values)
# set missing values to zero
X[missing_X] = 0
Y[missing_Y] = 0
distances = euclidean_distances(X, Y, squared=True)
# Adjust distances for missing values
XX = X * X
YY = Y * Y
distances -= np.dot(XX, missing_Y.T)
distances -= np.dot(missing_X, YY.T)
np.clip(distances, 0, None, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
np.fill_diagonal(distances, 0.0)
present_X = 1 - missing_X
present_Y = present_X if Y is X else ~missing_Y
present_count = np.dot(present_X, present_Y.T)
distances[present_count == 0] = np.nan
# avoid divide by zero
np.maximum(1, present_count, out=present_count)
distances /= present_count
distances *= X.shape[1]
if not squared:
np.sqrt(distances, out=distances)
return distances
def _euclidean_distances_upcast(X, XX=None, Y=None, YY=None, batch_size=None):
"""Euclidean distances between X and Y.
Assumes X and Y have float32 dtype.
Assumes XX and YY have float64 dtype or are None.
X and Y are upcast to float64 by chunks, which size is chosen to limit
memory increase by approximately 10% (at least 10MiB).
"""
n_samples_X = X.shape[0]
n_samples_Y = Y.shape[0]
n_features = X.shape[1]
distances = np.empty((n_samples_X, n_samples_Y), dtype=np.float32)
if batch_size is None:
x_density = X.nnz / np.prod(X.shape) if issparse(X) else 1
y_density = Y.nnz / np.prod(Y.shape) if issparse(Y) else 1
# Allow 10% more memory than X, Y and the distance matrix take (at
# least 10MiB)
maxmem = max(
((x_density * n_samples_X + y_density * n_samples_Y) * n_features
+ (x_density * n_samples_X * y_density * n_samples_Y)) / 10,
10 * 2 ** 17)
# The increase amount of memory in 8-byte blocks is:
# - x_density * batch_size * n_features (copy of chunk of X)
# - y_density * batch_size * n_features (copy of chunk of Y)
# - batch_size * batch_size (chunk of distance matrix)
# Hence x² + (xd+yd)kx = M, where x=batch_size, k=n_features, M=maxmem
# xd=x_density and yd=y_density
tmp = (x_density + y_density) * n_features
batch_size = (-tmp + np.sqrt(tmp ** 2 + 4 * maxmem)) / 2
batch_size = max(int(batch_size), 1)
x_batches = gen_batches(n_samples_X, batch_size)
for i, x_slice in enumerate(x_batches):
X_chunk = X[x_slice].astype(np.float64)
if XX is None:
XX_chunk = row_norms(X_chunk, squared=True)[:, np.newaxis]
else:
XX_chunk = XX[x_slice]
y_batches = gen_batches(n_samples_Y, batch_size)
for j, y_slice in enumerate(y_batches):
if X is Y and j < i:
# when X is Y the distance matrix is symmetric so we only need
# to compute half of it.
d = distances[y_slice, x_slice].T
else:
Y_chunk = Y[y_slice].astype(np.float64)
if YY is None:
YY_chunk = row_norms(Y_chunk, squared=True)[np.newaxis, :]
else:
YY_chunk = YY[:, y_slice]
d = -2 * safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True)
d += XX_chunk
d += YY_chunk
distances[x_slice, y_slice] = d.astype(np.float32, copy=False)
return distances
def _argmin_min_reduce(dist, start):
indices = dist.argmin(axis=1)
values = dist[np.arange(dist.shape[0]), indices]
return indices, values
def pairwise_distances_argmin_min(X, Y, *, axis=1, metric="euclidean",
metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Array containing points.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Array containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default='euclidean'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See Also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
indices, values = zip(*pairwise_distances_chunked(
X, Y, reduce_func=_argmin_min_reduce, metric=metric,
**metric_kwargs))
indices = np.concatenate(indices)
values = np.concatenate(values)
return indices, values
def pairwise_distances_argmin(X, Y, *, axis=1, metric="euclidean",
metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Array containing points.
Y : array-like of shape (n_samples_Y, n_features)
Arrays containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default="euclidean"
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See Also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis=axis, metric=metric,
metric_kwargs=metric_kwargs)[0]
def haversine_distances(X, Y=None):
"""Compute the Haversine distance between samples in X and Y.
The Haversine (or great circle) distance is the angular distance between
two points on the surface of a sphere. The first coordinate of each point
is assumed to be the latitude, the second is the longitude, given
in radians. The dimension of the data must be 2.
.. math::
D(x, y) = 2\\arcsin[\\sqrt{\\sin^2((x1 - y1) / 2)
+ \\cos(x1)\\cos(y1)\\sin^2((x2 - y2) / 2)}]
Parameters
----------
X : array-like of shape (n_samples_X, 2)
Y : array-like of shape (n_samples_Y, 2), default=None
Returns
-------
distance : ndarray of shape (n_samples_X, n_samples_Y)
Notes
-----
As the Earth is nearly spherical, the haversine formula provides a good
approximation of the distance between two points of the Earth surface, with
a less than 1% error on average.
Examples
--------
We want to calculate the distance between the Ezeiza Airport
(Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris,
France).
>>> from sklearn.metrics.pairwise import haversine_distances
>>> from math import radians
>>> bsas = [-34.83333, -58.5166646]
>>> paris = [49.0083899664, 2.53844117956]
>>> bsas_in_radians = [radians(_) for _ in bsas]
>>> paris_in_radians = [radians(_) for _ in paris]
>>> result = haversine_distances([bsas_in_radians, paris_in_radians])
>>> result * 6371000/1000 # multiply by Earth radius to get kilometers
array([[ 0. , 11099.54035582],
[11099.54035582, 0. ]])
"""
from ..neighbors import DistanceMetric
return DistanceMetric.get_metric('haversine').pairwise(X, Y)
def manhattan_distances(X, Y=None, *, sum_over_features=True):
"""Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array-like of shape (n_samples_Y, n_features), default=None
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
Returns
-------
D : ndarray of shape (n_samples_X * n_samples_Y, n_features) or \
(n_samples_X, n_samples_Y)
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Notes
--------
When X and/or Y are CSR sparse matrices and they are not already
in canonical format, this function modifies them in-place to
make them canonical.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])
array([[0.]])
>>> manhattan_distances([[3]], [[2]])
array([[1.]])
>>> manhattan_distances([[2]], [[3]])
array([[1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])
array([[0., 2.],
[4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = np.full((2, 2), 2.)
>>> manhattan_distances(X, y, sum_over_features=False)
array([[1., 1.],
[1., 1.]])
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
X.sum_duplicates() # this also sorts indices in-place
Y.sum_duplicates()
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Matrix `X`.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Matrix `Y`.
Returns
-------
distance matrix : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
cosine_similarity
scipy.spatial.distance.cosine : Dense matrices only.
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Y : array-like of shape (n_samples, n_features)
Returns
-------
distances : ndarray of shape (n_samples,)
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Y : array-like of shape (n_samples, n_features)
Returns
-------
distances : ndarray of shape (n_samples,)
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Y : array-like of shape (n_samples, n_features)
Returns
-------
distances : ndarray of shape (n_samples,)
Notes
-----
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm.
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, *, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray of shape (n_samples, n_features)
Array 2 for distance computation.
metric : str or callable, default="euclidean"
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray of shape (n_samples,)
See Also
--------
pairwise_distances : Computes the distance between every pair of samples.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([0., 1.])
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
degree : int, default=3
gamma : float, default=None
If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
gamma : float, default=None
If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples_X, n_features)
Input data.
Y : {ndarray, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : ndarray of shape (n_samples_X, n_samples_Y)
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T,
dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and
Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : ndarray of shape (n_samples_Y, n_features), default=None
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
See Also
--------
additive_chi2_kernel : The additive version of this kernel.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'haversine': haversine_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
'nan_euclidean': nan_euclidean_distances,
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'haversine' metrics.pairwise.haversine_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
'nan_euclidean' metrics.pairwise.nan_euclidean_distances
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _dist_wrapper(dist_func, dist_matrix, slice_, *args, **kwargs):
"""Write in-place to a slice of a distance matrix."""
dist_matrix[:, slice_] = dist_func(*args, **kwargs)
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel."""
if Y is None:
Y = X
X, Y, dtype = _return_float_dtype(X, Y)
if effective_n_jobs(n_jobs) == 1:
return func(X, Y, **kwds)
# enforce a threading backend to prevent data communication overhead
fd = delayed(_dist_wrapper)
ret = np.empty((X.shape[0], Y.shape[0]), dtype=dtype, order='F')
Parallel(backend="threading", n_jobs=n_jobs)(
fd(func, ret, s, X, Y[s], **kwds)
for s in gen_even_slices(_num_samples(Y), effective_n_jobs(n_jobs)))
if (X is Y or Y is None) and func is euclidean_distances:
# zeroing diagonal for euclidean norm.
# TODO: do it also for other norms.
np.fill_diagonal(ret, 0)
return ret
def _pairwise_callable(X, Y, metric, force_all_finite=True, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}.
"""
X, Y = check_pairwise_arrays(X, Y, force_all_finite=force_all_finite)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski",
'nan_euclidean', 'haversine']
_NAN_METRICS = ['nan_euclidean']
def _check_chunk_size(reduced, chunk_size):
"""Checks chunk is a sequence of expected size or a tuple of same.
"""
if reduced is None:
return
is_tuple = isinstance(reduced, tuple)
if not is_tuple:
reduced = (reduced,)
if any(isinstance(r, tuple) or not hasattr(r, '__iter__')
for r in reduced):
raise TypeError('reduce_func returned %r. '
'Expected sequence(s) of length %d.' %
(reduced if is_tuple else reduced[0], chunk_size))
if any(_num_samples(r) != chunk_size for r in reduced):
actual_size = tuple(_num_samples(r) for r in reduced)
raise ValueError('reduce_func returned object of length %s. '
'Expected same length as input: %d.' %
(actual_size if is_tuple else actual_size[0],
chunk_size))
def _precompute_metric_params(X, Y, metric=None, **kwds):
"""Precompute data-derived metric parameters if not provided.
"""
if metric == "seuclidean" and 'V' not in kwds:
# There is a bug in scipy < 1.5 that will cause a crash if
# X.dtype != np.double (float64). See PR #15730
dtype = np.float64 if sp_version < parse_version('1.5') else None
if X is Y:
V = np.var(X, axis=0, ddof=1, dtype=dtype)
else:
raise ValueError(
"The 'V' parameter is required for the seuclidean metric "
"when Y is passed.")
return {'V': V}
if metric == "mahalanobis" and 'VI' not in kwds:
if X is Y:
VI = np.linalg.inv(np.cov(X.T)).T
else:
raise ValueError(
"The 'VI' parameter is required for the mahalanobis metric "
"when Y is passed.")
return {'VI': VI}
return {}
def pairwise_distances_chunked(X, Y=None, *, reduce_func=None,
metric='euclidean', n_jobs=None,
working_memory=None, **kwds):
"""Generate a distance matrix chunk by chunk with optional reduction.
In cases where not all of a pairwise distance matrix needs to be stored at
once, this is used to calculate pairwise distances in
``working_memory``-sized chunks. If ``reduce_func`` is given, it is run
on each chunk and its return values are concatenated into lists, arrays
or sparse matrices.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise distances between samples, or a feature array.
The shape the array should be (n_samples_X, n_samples_X) if
metric='precomputed' and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. Only allowed if
metric != "precomputed".
reduce_func : callable, default=None
The function which is applied on each chunk of the distance matrix,
reducing it to needed values. ``reduce_func(D_chunk, start)``
is called repeatedly, where ``D_chunk`` is a contiguous vertical
slice of the pairwise distance matrix, starting at row ``start``.
It should return one of: None; an array, a list, or a sparse matrix
of length ``D_chunk.shape[0]``; or a tuple of such objects. Returning
None is useful for in-place operations, rather than reductions.
If None, pairwise_distances_chunked returns a generator of vertical
chunks of the distance matrix.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
working_memory : int, default=None
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Yields
------
D_chunk : {ndarray, sparse matrix}
A contiguous slice of distance matrix, optionally processed by
``reduce_func``.
Examples
--------
Without reduce_func:
>>> import numpy as np
>>> from sklearn.metrics import pairwise_distances_chunked
>>> X = np.random.RandomState(0).rand(5, 3)
>>> D_chunk = next(pairwise_distances_chunked(X))
>>> D_chunk
array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...],
[0.29..., 0. ..., 0.57..., 0.41..., 0.76...],
[0.41..., 0.57..., 0. ..., 0.44..., 0.90...],
[0.19..., 0.41..., 0.44..., 0. ..., 0.51...],
[0.57..., 0.76..., 0.90..., 0.51..., 0. ...]])
Retrieve all neighbors and average distance within radius r:
>>> r = .2
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r) for d in D_chunk]
... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1)
... return neigh, avg_dist
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func)
>>> neigh, avg_dist = next(gen)
>>> neigh
[array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])]
>>> avg_dist
array([0.039..., 0. , 0. , 0.039..., 0. ])
Where r is defined per sample, we need to make use of ``start``:
>>> r = [.2, .4, .4, .3, .1]
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r[i])
... for i, d in enumerate(D_chunk, start)]
... return neigh
>>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func))
>>> neigh
[array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])]
Force row-by-row generation by reducing ``working_memory``:
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func,
... working_memory=0)
>>> next(gen)
[array([0, 3])]
>>> next(gen)
[array([0, 1])]
"""
n_samples_X = _num_samples(X)
if metric == 'precomputed':
slices = (slice(0, n_samples_X),)
else:
if Y is None:
Y = X
# We get as many rows as possible within our working_memory budget to
# store len(Y) distances in each row of output.
#
# Note:
# - this will get at least 1 row, even if 1 row of distances will
# exceed working_memory.
# - this does not account for any temporary memory usage while
# calculating distances (e.g. difference of vectors in manhattan
# distance.
chunk_n_rows = get_chunk_n_rows(row_bytes=8 * _num_samples(Y),
max_n_rows=n_samples_X,
working_memory=working_memory)
slices = gen_batches(n_samples_X, chunk_n_rows)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
for sl in slices:
if sl.start == 0 and sl.stop == n_samples_X:
X_chunk = X # enable optimised paths for X is Y
else:
X_chunk = X[sl]
D_chunk = pairwise_distances(X_chunk, Y, metric=metric,
n_jobs=n_jobs, **kwds)
if ((X is Y or Y is None)
and PAIRWISE_DISTANCE_FUNCTIONS.get(metric, None)
is euclidean_distances):
# zeroing diagonal, taking care of aliases of "euclidean",
# i.e. "l2"
D_chunk.flat[sl.start::_num_samples(X) + 1] = 0
if reduce_func is not None:
chunk_size = D_chunk.shape[0]
D_chunk = reduce_func(D_chunk, sl.start)
_check_chunk_size(D_chunk, chunk_size)
yield D_chunk
def pairwise_distances(X, Y=None, metric="euclidean", *, n_jobs=None,
force_all_finite=True, **kwds):
"""Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix
inputs.
['nan_euclidean'] but it does not yet support sparse matrices.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise distances between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. Only allowed if
metric != "precomputed".
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. Ignored
for a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_samples_Y)
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See Also
--------
pairwise_distances_chunked : Performs the same calculation as this
function, but returns a generator of chunks of the distance matrix, in
order to limit memory usage.
paired_distances : Computes the distances between corresponding elements
of two arrays.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True,
force_all_finite=force_all_finite)
whom = ("`pairwise_distances`. Precomputed distance "
" need to have non-negative values.")
check_non_negative(X, whom=whom)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric,
force_all_finite=force_all_finite, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
if (dtype == bool and
(X.dtype != bool or (Y is not None and Y.dtype != bool))):
msg = "Data was converted to boolean for metric %s" % metric
warnings.warn(msg, DataConversionWarning)
X, Y = check_pairwise_arrays(X, Y, dtype=dtype,
force_all_finite=force_all_finite)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
if effective_n_jobs(n_jobs) == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances require boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
"""Valid metrics for pairwise_kernels.
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": frozenset(["gamma"]),
"cosine": (),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", *, filter_params=False,
n_jobs=None, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are:
['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',
'laplacian', 'sigmoid', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
A second feature array only if X has shape (n_samples_X, n_features).
metric : str or callable, default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two rows from X as input and return the corresponding
kernel value as a single number. This means that callables from
:mod:`sklearn.metrics.pairwise` are not allowed, as they operate on
matrices, not single samples. Use the string identifying the kernel
instead.
filter_params : bool, default=False
Whether to filter invalid parameters or not.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_samples_Y)
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = {k: kwds[k] for k in kwds
if k in KERNEL_PARAMS[metric]}
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
chrisburr/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
teonlamont/mne-python | mne/preprocessing/maxwell.py | 3 | 81794 | # -*- coding: utf-8 -*-
# Authors: Mark Wronkiewicz <[email protected]>
# Eric Larson <[email protected]>
# Jussi Nurminen <[email protected]>
# License: BSD (3-clause)
from functools import partial
from math import factorial
from os import path as op
import numpy as np
from scipy import linalg
from .. import __version__
from ..bem import _check_origin
from ..chpi import quat_to_rot, rot_to_quat
from ..transforms import (_str_to_frame, _get_trans, Transform, apply_trans,
_find_vector_rotation, _cart_to_sph, _get_n_moments,
_sph_to_cart_partials, _deg_ord_idx, _average_quats,
_sh_complex_to_real, _sh_real_to_complex, _sh_negate)
from ..forward import _concatenate_coils, _prep_meg_channels, _create_meg_coils
from ..surface import _normalize_vectors
from ..io.constants import FIFF
from ..io.meas_info import _simplify_info
from ..io.proc_history import _read_ctc
from ..io.write import _generate_meas_id, DATE_NONE
from ..io import _loc_to_coil_trans, BaseRaw
from ..io.pick import pick_types, pick_info
from ..utils import verbose, logger, _clean_names, warn, _time_mask, _pl
from ..fixes import _get_args, _safe_svd, _get_sph_harm, einsum
from ..externals.six import string_types
from ..channels.channels import _get_T1T2_mag_inds
# Note: Elekta uses single precision and some algorithms might use
# truncated versions of constants (e.g., μ0), which could lead to small
# differences between algorithms
@verbose
def maxwell_filter(raw, origin='auto', int_order=8, ext_order=3,
calibration=None, cross_talk=None, st_duration=None,
st_correlation=0.98, coord_frame='head', destination=None,
regularize='in', ignore_ref=False, bad_condition='error',
head_pos=None, st_fixed=True, st_only=False, mag_scale=100.,
verbose=None):
u"""Apply Maxwell filter to data using multipole moments.
.. warning:: Automatic bad channel detection is not currently implemented.
It is critical to mark bad channels before running Maxwell
filtering to prevent artifact spreading.
.. warning:: Maxwell filtering in MNE is not designed or certified
for clinical use.
Parameters
----------
raw : instance of mne.io.Raw
Data to be filtered
origin : array-like, shape (3,) | str
Origin of internal and external multipolar moment space in meters.
The default is ``'auto'``, which means a head-digitization-based
origin fit when ``coord_frame='head'``, and ``(0., 0., 0.)`` when
``coord_frame='meg'``.
int_order : int
Order of internal component of spherical expansion.
ext_order : int
Order of external component of spherical expansion.
calibration : str | None
Path to the ``'.dat'`` file with fine calibration coefficients.
File can have 1D or 3D gradiometer imbalance correction.
This file is machine/site-specific.
cross_talk : str | None
Path to the FIF file with cross-talk correction information.
st_duration : float | None
If not None, apply spatiotemporal SSS with specified buffer duration
(in seconds). Elekta's default is 10.0 seconds in MaxFilter™ v2.2.
Spatiotemporal SSS acts as implicitly as a high-pass filter where the
cut-off frequency is 1/st_dur Hz. For this (and other) reasons, longer
buffers are generally better as long as your system can handle the
higher memory usage. To ensure that each window is processed
identically, choose a buffer length that divides evenly into your data.
Any data at the trailing edge that doesn't fit evenly into a whole
buffer window will be lumped into the previous buffer.
st_correlation : float
Correlation limit between inner and outer subspaces used to reject
ovwrlapping intersecting inner/outer signals during spatiotemporal SSS.
coord_frame : str
The coordinate frame that the ``origin`` is specified in, either
``'meg'`` or ``'head'``. For empty-room recordings that do not have
a head<->meg transform ``info['dev_head_t']``, the MEG coordinate
frame should be used.
destination : str | array-like, shape (3,) | None
The destination location for the head. Can be ``None``, which
will not change the head position, or a string path to a FIF file
containing a MEG device<->head transformation, or a 3-element array
giving the coordinates to translate to (with no rotations).
For example, ``destination=(0, 0, 0.04)`` would translate the bases
as ``--trans default`` would in MaxFilter™ (i.e., to the default
head location).
regularize : str | None
Basis regularization type, must be "in" or None.
"in" is the same algorithm as the "-regularize in" option in
MaxFilter™.
ignore_ref : bool
If True, do not include reference channels in compensation. This
option should be True for KIT files, since Maxwell filtering
with reference channels is not currently supported.
bad_condition : str
How to deal with ill-conditioned SSS matrices. Can be "error"
(default), "warning", or "ignore".
head_pos : array | None
If array, movement compensation will be performed.
The array should be of shape (N, 10), holding the position
parameters as returned by e.g. `read_head_pos`.
.. versionadded:: 0.12
st_fixed : bool
If True (default), do tSSS using the median head position during the
``st_duration`` window. This is the default behavior of MaxFilter
and has been most extensively tested.
.. versionadded:: 0.12
st_only : bool
If True, only tSSS (temporal) projection of MEG data will be
performed on the output data. The non-tSSS parameters (e.g.,
``int_order``, ``calibration``, ``head_pos``, etc.) will still be
used to form the SSS bases used to calculate temporal projectors,
but the output MEG data will *only* have temporal projections
performed. Noise reduction from SSS basis multiplication,
cross-talk cancellation, movement compensation, and so forth
will not be applied to the data. This is useful, for example, when
evoked movement compensation will be performed with
:func:`mne.epochs.average_movements`.
.. versionadded:: 0.12
mag_scale : float | str
The magenetometer scale-factor used to bring the magnetometers
to approximately the same order of magnitude as the gradiometers
(default 100.), as they have different units (T vs T/m).
Can be ``'auto'`` to use the reciprocal of the physical distance
between the gradiometer pickup loops (e.g., 0.0168 m yields
59.5 for VectorView).
.. versionadded:: 0.13
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
raw_sss : instance of mne.io.Raw
The raw data with Maxwell filtering applied.
See Also
--------
mne.chpi.filter_chpi
mne.chpi.read_head_pos
mne.epochs.average_movements
Notes
-----
.. versionadded:: 0.11
Some of this code was adapted and relicensed (with BSD form) with
permission from Jussi Nurminen. These algorithms are based on work
from [1]_ and [2]_.
.. note:: This code may use multiple CPU cores, see the
:ref:`FAQ <faq_cpu>` for more information.
Compared to Elekta's MaxFilter™ software, the MNE Maxwell filtering
routines currently provide the following features:
+-----------------------------------------------------------------------------+-----+-----------+
| Feature | MNE | MaxFilter |
+=============================================================================+=====+===========+
| Maxwell filtering software shielding | X | X |
+-----------------------------------------------------------------------------+-----+-----------+
| Bad channel reconstruction | X | X |
+-----------------------------------------------------------------------------+-----+-----------+
| Cross-talk cancellation | X | X |
+-----------------------------------------------------------------------------+-----+-----------+
| Fine calibration correction (1D) | X | X |
+-----------------------------------------------------------------------------+-----+-----------+
| Fine calibration correction (3D) | X | |
+-----------------------------------------------------------------------------+-----+-----------+
| Spatio-temporal SSS (tSSS) | X | X |
+-----------------------------------------------------------------------------+-----+-----------+
| Coordinate frame translation | X | X |
+-----------------------------------------------------------------------------+-----+-----------+
| Regularization using information theory | X | X |
+-----------------------------------------------------------------------------+-----+-----------+
| Movement compensation (raw) | X | X |
+-----------------------------------------------------------------------------+-----+-----------+
| Movement compensation (:func:`epochs <mne.epochs.average_movements>`) | X | |
+-----------------------------------------------------------------------------+-----+-----------+
| :func:`cHPI subtraction <mne.chpi.filter_chpi>` | X | X |
+-----------------------------------------------------------------------------+-----+-----------+
| Double floating point precision | X | |
+-----------------------------------------------------------------------------+-----+-----------+
| Seamless processing of split (``-1.fif``) and concatenated files | X | |
+-----------------------------------------------------------------------------+-----+-----------+
| Certified for clinical use | | X |
+-----------------------------------------------------------------------------+-----+-----------+
| Automatic bad channel detection | | X |
+-----------------------------------------------------------------------------+-----+-----------+
| Head position estimation | | X |
+-----------------------------------------------------------------------------+-----+-----------+
Epoch-based movement compensation is described in [1]_.
Use of Maxwell filtering routines with non-Elekta systems is currently
**experimental**. Worse results for non-Elekta systems are expected due
to (at least):
* Missing fine-calibration and cross-talk cancellation data for
other systems.
* Processing with reference sensors has not been vetted.
* Regularization of components may not work well for all systems.
* Coil integration has not been optimized using Abramowitz/Stegun
definitions.
.. note:: Various Maxwell filtering algorithm components are covered by
patents owned by Elekta Oy, Helsinki, Finland.
These patents include, but may not be limited to:
- US2006031038 (Signal Space Separation)
- US6876196 (Head position determination)
- WO2005067789 (DC fields)
- WO2005078467 (MaxShield)
- WO2006114473 (Temporal Signal Space Separation)
These patents likely preclude the use of Maxwell filtering code
in commercial applications. Consult a lawyer if necessary.
Currently, in order to perform Maxwell filtering, the raw data must not
have any projectors applied. During Maxwell filtering, the spatial
structure of the data is modified, so projectors are discarded (unless
in ``st_only=True`` mode).
References
----------
.. [1] Taulu S. and Kajola M. "Presentation of electromagnetic
multichannel data: The signal space separation method,"
Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005.
http://lib.tkk.fi/Diss/2008/isbn9789512295654/article2.pdf
.. [2] Taulu S. and Simola J. "Spatiotemporal signal space separation
method for rejecting nearby interference in MEG measurements,"
Physics in Medicine and Biology, vol. 51, pp. 1759-1768, 2006.
http://lib.tkk.fi/Diss/2008/isbn9789512295654/article3.pdf
""" # noqa: E501
# There are an absurd number of different possible notations for spherical
# coordinates, which confounds the notation for spherical harmonics. Here,
# we purposefully stay away from shorthand notation in both and use
# explicit terms (like 'azimuth' and 'polar') to avoid confusion.
# See mathworld.wolfram.com/SphericalHarmonic.html for more discussion.
# Our code follows the same standard that ``scipy`` uses for ``sph_harm``.
# triage inputs ASAP to avoid late-thrown errors
if not isinstance(raw, BaseRaw):
raise TypeError('raw must be Raw, not %s' % type(raw))
_check_usable(raw)
_check_regularize(regularize)
st_correlation = float(st_correlation)
if st_correlation <= 0. or st_correlation > 1.:
raise ValueError('Need 0 < st_correlation <= 1., got %s'
% st_correlation)
if coord_frame not in ('head', 'meg'):
raise ValueError('coord_frame must be either "head" or "meg", not "%s"'
% coord_frame)
head_frame = True if coord_frame == 'head' else False
recon_trans = _check_destination(destination, raw.info, head_frame)
if st_duration is not None:
st_duration = float(st_duration)
if not 0. < st_duration <= raw.times[-1] + 1. / raw.info['sfreq']:
raise ValueError('st_duration (%0.1fs) must be between 0 and the '
'duration of the data (%0.1fs).'
% (st_duration, raw.times[-1]))
st_correlation = float(st_correlation)
st_duration = int(round(st_duration * raw.info['sfreq']))
if not 0. < st_correlation <= 1:
raise ValueError('st_correlation must be between 0. and 1.')
if not isinstance(bad_condition, string_types) or \
bad_condition not in ['error', 'warning', 'ignore']:
raise ValueError('bad_condition must be "error", "warning", or '
'"ignore", not %s' % bad_condition)
if raw.info['dev_head_t'] is None and coord_frame == 'head':
raise RuntimeError('coord_frame cannot be "head" because '
'info["dev_head_t"] is None; if this is an '
'empty room recording, consider using '
'coord_frame="meg"')
if st_only and st_duration is None:
raise ValueError('st_duration must not be None if st_only is True')
head_pos = _check_pos(head_pos, head_frame, raw, st_fixed,
raw.info['sfreq'])
_check_info(raw.info, sss=not st_only, tsss=st_duration is not None,
calibration=not st_only and calibration is not None,
ctc=not st_only and cross_talk is not None)
# Now we can actually get moving
logger.info('Maxwell filtering raw data')
add_channels = (head_pos[0] is not None) and not st_only
raw_sss, pos_picks = _copy_preload_add_channels(
raw, add_channels=add_channels)
del raw
if not st_only:
# remove MEG projectors, they won't apply now
_remove_meg_projs(raw_sss)
info = raw_sss.info
meg_picks, mag_picks, grad_picks, good_picks, mag_or_fine = \
_get_mf_picks(info, int_order, ext_order, ignore_ref)
# Magnetometers are scaled to improve numerical stability
coil_scale, mag_scale = _get_coil_scale(
meg_picks, mag_picks, grad_picks, mag_scale, info)
#
# Fine calibration processing (load fine cal and overwrite sensor geometry)
#
sss_cal = dict()
if calibration is not None:
calibration, sss_cal = _update_sensor_geometry(info, calibration,
ignore_ref)
mag_or_fine.fill(True) # all channels now have some mag-type data
# Determine/check the origin of the expansion
origin = _check_origin(origin, info, coord_frame, disp=True)
# Convert to the head frame
if coord_frame == 'meg' and info['dev_head_t'] is not None:
origin_head = apply_trans(info['dev_head_t'], origin)
else:
origin_head = origin
orig_origin, orig_coord_frame = origin, coord_frame
del origin, coord_frame
origin_head.setflags(write=False)
n_in, n_out = _get_n_moments([int_order, ext_order])
#
# Cross-talk processing
#
if cross_talk is not None:
sss_ctc = _read_ctc(cross_talk)
ctc_chs = sss_ctc['proj_items_chs']
meg_ch_names = [info['ch_names'][p] for p in meg_picks]
# checking for extra space ambiguity in channel names
# between old and new fif files
if meg_ch_names[0] not in ctc_chs:
ctc_chs = _clean_names(ctc_chs, remove_whitespace=True)
missing = sorted(list(set(meg_ch_names) - set(ctc_chs)))
if len(missing) != 0:
raise RuntimeError('Missing MEG channels in cross-talk matrix:\n%s'
% missing)
missing = sorted(list(set(ctc_chs) - set(meg_ch_names)))
if len(missing) > 0:
warn('Not all cross-talk channels in raw:\n%s' % missing)
ctc_picks = [ctc_chs.index(info['ch_names'][c])
for c in meg_picks[good_picks]]
assert len(ctc_picks) == len(good_picks) # otherwise we errored
ctc = sss_ctc['decoupler'][ctc_picks][:, ctc_picks]
# I have no idea why, but MF transposes this for storage..
sss_ctc['decoupler'] = sss_ctc['decoupler'].T.tocsc()
else:
sss_ctc = dict()
#
# Translate to destination frame (always use non-fine-cal bases)
#
exp = dict(origin=origin_head, int_order=int_order, ext_order=0)
all_coils = _prep_mf_coils(info, ignore_ref)
S_recon = _trans_sss_basis(exp, all_coils, recon_trans, coil_scale)
exp['ext_order'] = ext_order
# Reconstruct data from internal space only (Eq. 38), and rescale S_recon
S_recon /= coil_scale
if recon_trans is not None:
# warn if we have translated too far
diff = 1000 * (info['dev_head_t']['trans'][:3, 3] -
recon_trans['trans'][:3, 3])
dist = np.sqrt(np.sum(_sq(diff)))
if dist > 25.:
warn('Head position change is over 25 mm (%s) = %0.1f mm'
% (', '.join('%0.1f' % x for x in diff), dist))
# Reconstruct raw file object with spatiotemporal processed data
max_st = dict()
if st_duration is not None:
max_st.update(job=10, subspcorr=st_correlation,
buflen=st_duration / info['sfreq'])
logger.info(' Processing data using tSSS with st_duration=%s'
% max_st['buflen'])
st_when = 'before' if st_fixed else 'after' # relative to movecomp
else:
# st_duration from here on will act like the chunk size
st_duration = max(int(round(10. * info['sfreq'])), 1)
st_correlation = None
st_when = 'never'
st_duration = min(len(raw_sss.times), st_duration)
del st_fixed
# Generate time points to break up data into equal-length windows
read_lims = np.arange(0, len(raw_sss.times) + 1, st_duration)
if len(read_lims) == 1:
read_lims = np.concatenate([read_lims, [len(raw_sss.times)]])
if read_lims[-1] != len(raw_sss.times):
read_lims[-1] = len(raw_sss.times)
# len_last_buf < st_dur so fold it into the previous buffer
if st_correlation is not None and len(read_lims) > 2:
logger.info(' Spatiotemporal window did not fit evenly into '
'raw object. The final %0.2f seconds were lumped '
'onto the previous window.'
% ((read_lims[-1] - read_lims[-2] - st_duration) /
info['sfreq'],))
assert len(read_lims) >= 2
assert read_lims[0] == 0 and read_lims[-1] == len(raw_sss.times)
#
# Do the heavy lifting
#
# Figure out which transforms we need for each tSSS block
# (and transform pos[1] to times)
head_pos[1] = raw_sss.time_as_index(head_pos[1], use_rounding=True)
# Compute the first bit of pos_data for cHPI reporting
if info['dev_head_t'] is not None and head_pos[0] is not None:
this_pos_quat = np.concatenate([
rot_to_quat(info['dev_head_t']['trans'][:3, :3]),
info['dev_head_t']['trans'][:3, 3],
np.zeros(3)])
else:
this_pos_quat = None
_get_this_decomp_trans = partial(
_get_decomp, all_coils=all_coils,
cal=calibration, regularize=regularize,
exp=exp, ignore_ref=ignore_ref, coil_scale=coil_scale,
grad_picks=grad_picks, mag_picks=mag_picks, good_picks=good_picks,
mag_or_fine=mag_or_fine, bad_condition=bad_condition,
mag_scale=mag_scale)
S_decomp, pS_decomp, reg_moments, n_use_in = _get_this_decomp_trans(
info['dev_head_t'], t=0.)
reg_moments_0 = reg_moments.copy()
# Loop through buffer windows of data
n_sig = int(np.floor(np.log10(max(len(read_lims), 0)))) + 1
logger.info(' Processing %s data chunk%s of (at least) %0.1f sec'
% (len(read_lims) - 1, _pl(read_lims),
st_duration / info['sfreq']))
for ii, (start, stop) in enumerate(zip(read_lims[:-1], read_lims[1:])):
rel_times = raw_sss.times[start:stop]
t_str = '%8.3f - %8.3f sec' % tuple(rel_times[[0, -1]])
t_str += ('(#%d/%d)'
% (ii + 1, len(read_lims) - 1)).rjust(2 * n_sig + 5)
# Get original data
orig_data = raw_sss._data[meg_picks[good_picks], start:stop]
# This could just be np.empty if not st_only, but shouldn't be slow
# this way so might as well just always take the original data
out_meg_data = raw_sss._data[meg_picks, start:stop]
# Apply cross-talk correction
if cross_talk is not None:
orig_data = ctc.dot(orig_data)
out_pos_data = np.empty((len(pos_picks), stop - start))
# Figure out which positions to use
t_s_s_q_a = _trans_starts_stops_quats(head_pos, start, stop,
this_pos_quat)
n_positions = len(t_s_s_q_a[0])
# Set up post-tSSS or do pre-tSSS
if st_correlation is not None:
# If doing tSSS before movecomp...
resid = orig_data.copy() # to be safe let's operate on a copy
if st_when == 'after':
orig_in_data = np.empty((len(meg_picks), stop - start))
else: # 'before'
avg_trans = t_s_s_q_a[-1]
if avg_trans is not None:
# if doing movecomp
S_decomp_st, pS_decomp_st, _, n_use_in_st = \
_get_this_decomp_trans(avg_trans, t=rel_times[0])
else:
S_decomp_st, pS_decomp_st = S_decomp, pS_decomp
n_use_in_st = n_use_in
orig_in_data = np.dot(np.dot(S_decomp_st[:, :n_use_in_st],
pS_decomp_st[:n_use_in_st]),
resid)
resid -= np.dot(np.dot(S_decomp_st[:, n_use_in_st:],
pS_decomp_st[n_use_in_st:]), resid)
resid -= orig_in_data
# Here we operate on our actual data
proc = out_meg_data if st_only else orig_data
_do_tSSS(proc, orig_in_data, resid, st_correlation,
n_positions, t_str)
if not st_only or st_when == 'after':
# Do movement compensation on the data
for trans, rel_start, rel_stop, this_pos_quat in \
zip(*t_s_s_q_a[:4]):
# Recalculate bases if necessary (trans will be None iff the
# first position in this interval is the same as last of the
# previous interval)
if trans is not None:
S_decomp, pS_decomp, reg_moments, n_use_in = \
_get_this_decomp_trans(trans, t=rel_times[rel_start])
# Determine multipole moments for this interval
mm_in = np.dot(pS_decomp[:n_use_in],
orig_data[:, rel_start:rel_stop])
# Our output data
if not st_only:
out_meg_data[:, rel_start:rel_stop] = \
np.dot(S_recon.take(reg_moments[:n_use_in], axis=1),
mm_in)
if len(pos_picks) > 0:
out_pos_data[:, rel_start:rel_stop] = \
this_pos_quat[:, np.newaxis]
# Transform orig_data to store just the residual
if st_when == 'after':
# Reconstruct data using original location from external
# and internal spaces and compute residual
rel_resid_data = resid[:, rel_start:rel_stop]
orig_in_data[:, rel_start:rel_stop] = \
np.dot(S_decomp[:, :n_use_in], mm_in)
rel_resid_data -= np.dot(np.dot(S_decomp[:, n_use_in:],
pS_decomp[n_use_in:]),
rel_resid_data)
rel_resid_data -= orig_in_data[:, rel_start:rel_stop]
# If doing tSSS at the end
if st_when == 'after':
_do_tSSS(out_meg_data, orig_in_data, resid, st_correlation,
n_positions, t_str)
elif st_when == 'never' and head_pos[0] is not None:
logger.info(' Used % 2d head position%s for %s'
% (n_positions, _pl(n_positions), t_str))
raw_sss._data[meg_picks, start:stop] = out_meg_data
raw_sss._data[pos_picks, start:stop] = out_pos_data
# Update info
if not st_only:
info['dev_head_t'] = recon_trans # set the reconstruction transform
_update_sss_info(raw_sss, orig_origin, int_order, ext_order,
len(good_picks), orig_coord_frame, sss_ctc, sss_cal,
max_st, reg_moments_0, st_only)
logger.info('[done]')
return raw_sss
def _get_coil_scale(meg_picks, mag_picks, grad_picks, mag_scale, info):
"""Get the magnetometer scale factor."""
if isinstance(mag_scale, string_types):
if mag_scale != 'auto':
raise ValueError('mag_scale must be a float or "auto", got "%s"'
% mag_scale)
if len(mag_picks) in (0, len(meg_picks)):
mag_scale = 100. # only one coil type, doesn't matter
logger.info(' Setting mag_scale=%0.2f because only one '
'coil type is present' % mag_scale)
else:
# Find our physical distance between gradiometer pickup loops
# ("base line")
coils = _create_meg_coils([info['chs'][pick]
for pick in meg_picks], 'accurate')
grad_base = set(coils[pick]['base'] for pick in grad_picks)
if len(grad_base) != 1 or list(grad_base)[0] <= 0:
raise RuntimeError('Could not automatically determine '
'mag_scale, could not find one '
'proper gradiometer distance from: %s'
% list(grad_base))
grad_base = list(grad_base)[0]
mag_scale = 1. / grad_base
logger.info(' Setting mag_scale=%0.2f based on gradiometer '
'distance %0.2f mm' % (mag_scale, 1000 * grad_base))
mag_scale = float(mag_scale)
coil_scale = np.ones((len(meg_picks), 1))
coil_scale[mag_picks] = mag_scale
return coil_scale, mag_scale
def _remove_meg_projs(inst):
"""Remove inplace existing MEG projectors (assumes inactive)."""
meg_picks = pick_types(inst.info, meg=True, exclude=[])
meg_channels = [inst.ch_names[pi] for pi in meg_picks]
non_meg_proj = list()
for proj in inst.info['projs']:
if not any(c in meg_channels for c in proj['data']['col_names']):
non_meg_proj.append(proj)
inst.add_proj(non_meg_proj, remove_existing=True, verbose=False)
def _check_destination(destination, info, head_frame):
"""Triage our reconstruction trans."""
if destination is None:
return info['dev_head_t']
if not head_frame:
raise RuntimeError('destination can only be set if using the '
'head coordinate frame')
if isinstance(destination, string_types):
recon_trans = _get_trans(destination, 'meg', 'head')[0]
elif isinstance(destination, Transform):
recon_trans = destination
else:
destination = np.array(destination, float)
if destination.shape != (3,):
raise ValueError('destination must be a 3-element vector, '
'str, or None')
recon_trans = np.eye(4)
recon_trans[:3, 3] = destination
recon_trans = Transform('meg', 'head', recon_trans)
if recon_trans.to_str != 'head' or recon_trans.from_str != 'MEG device':
raise RuntimeError('Destination transform is not MEG device -> head, '
'got %s -> %s' % (recon_trans.from_str,
recon_trans.to_str))
return recon_trans
def _prep_mf_coils(info, ignore_ref=True):
"""Get all coil integration information loaded and sorted."""
coils, comp_coils = _prep_meg_channels(
info, accurate=True, elekta_defs=True, head_frame=False,
ignore_ref=ignore_ref, do_picking=False, verbose=False)[:2]
mag_mask = _get_mag_mask(coils)
if len(comp_coils) > 0:
meg_picks = pick_types(info, meg=True, ref_meg=False, exclude=[])
ref_picks = pick_types(info, meg=False, ref_meg=True, exclude=[])
inserts = np.searchsorted(meg_picks, ref_picks)
# len(inserts) == len(comp_coils)
for idx, comp_coil in zip(inserts[::-1], comp_coils[::-1]):
coils.insert(idx, comp_coil)
# Now we have:
# [c['chname'] for c in coils] ==
# [info['ch_names'][ii]
# for ii in pick_types(info, meg=True, ref_meg=True)]
# Now coils is a sorted list of coils. Time to do some vectorization.
n_coils = len(coils)
rmags = np.concatenate([coil['rmag'] for coil in coils])
cosmags = np.concatenate([coil['cosmag'] for coil in coils])
ws = np.concatenate([coil['w'] for coil in coils])
cosmags *= ws[:, np.newaxis]
del ws
n_int = np.array([len(coil['rmag']) for coil in coils])
bins = np.repeat(np.arange(len(n_int)), n_int)
bd = np.concatenate(([0], np.cumsum(n_int)))
slice_map = dict((ii, slice(start, stop))
for ii, (start, stop) in enumerate(zip(bd[:-1], bd[1:])))
return rmags, cosmags, bins, n_coils, mag_mask, slice_map
def _trans_starts_stops_quats(pos, start, stop, this_pos_data):
"""Get all trans and limits we need."""
pos_idx = np.arange(*np.searchsorted(pos[1], [start, stop]))
used = np.zeros(stop - start, bool)
trans = list()
rel_starts = list()
rel_stops = list()
quats = list()
weights = list()
for ti in range(-1, len(pos_idx)):
# first iteration for this block of data
if ti < 0:
rel_start = 0
rel_stop = pos[1][pos_idx[0]] if len(pos_idx) > 0 else stop
rel_stop = rel_stop - start
if rel_start == rel_stop:
continue # our first pos occurs on first time sample
# Don't calculate S_decomp here, use the last one
trans.append(None) # meaning: use previous
quats.append(this_pos_data)
else:
rel_start = pos[1][pos_idx[ti]] - start
if ti == len(pos_idx) - 1:
rel_stop = stop - start
else:
rel_stop = pos[1][pos_idx[ti + 1]] - start
trans.append(pos[0][pos_idx[ti]])
quats.append(pos[2][pos_idx[ti]])
assert 0 <= rel_start
assert rel_start < rel_stop
assert rel_stop <= stop - start
assert not used[rel_start:rel_stop].any()
used[rel_start:rel_stop] = True
rel_starts.append(rel_start)
rel_stops.append(rel_stop)
weights.append(rel_stop - rel_start)
assert used.all()
# Use weighted average for average trans over the window
if this_pos_data is None:
avg_trans = None
else:
weights = np.array(weights)
quats = np.array(quats)
weights = weights / weights.sum().astype(float) # int -> float
avg_quat = _average_quats(quats[:, :3], weights)
avg_t = np.dot(weights, quats[:, 3:6])
avg_trans = np.vstack([
np.hstack([quat_to_rot(avg_quat), avg_t[:, np.newaxis]]),
[[0., 0., 0., 1.]]])
return trans, rel_starts, rel_stops, quats, avg_trans
def _do_tSSS(clean_data, orig_in_data, resid, st_correlation,
n_positions, t_str):
"""Compute and apply SSP-like projection vectors based on min corr."""
np.asarray_chkfinite(resid)
t_proj = _overlap_projector(orig_in_data, resid, st_correlation)
# Apply projector according to Eq. 12 in [2]_
msg = (' Projecting %2d intersecting tSSS component%s '
'for %s' % (t_proj.shape[1], _pl(t_proj.shape[1], ' '), t_str))
if n_positions > 1:
msg += ' (across %2d position%s)' % (n_positions,
_pl(n_positions, ' '))
logger.info(msg)
clean_data -= np.dot(np.dot(clean_data, t_proj), t_proj.T)
def _copy_preload_add_channels(raw, add_channels):
"""Load data for processing and (maybe) add cHPI pos channels."""
raw = raw.copy()
if add_channels:
kinds = [FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2, FIFF.FIFFV_QUAT_3,
FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5, FIFF.FIFFV_QUAT_6,
FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR, FIFF.FIFFV_HPI_MOV]
out_shape = (len(raw.ch_names) + len(kinds), len(raw.times))
out_data = np.zeros(out_shape, np.float64)
msg = ' Appending head position result channels and '
if raw.preload:
logger.info(msg + 'copying original raw data')
out_data[:len(raw.ch_names)] = raw._data
raw._data = out_data
else:
logger.info(msg + 'loading raw data from disk')
raw._preload_data(out_data[:len(raw.ch_names)], verbose=False)
raw._data = out_data
assert raw.preload is True
off = len(raw.ch_names)
chpi_chs = [
dict(ch_name='CHPI%03d' % (ii + 1), logno=ii + 1,
scanno=off + ii + 1, unit_mul=-1, range=1., unit=-1,
kind=kinds[ii], coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
cal=1e-4, coil_type=FIFF.FWD_COIL_UNKNOWN, loc=np.zeros(12))
for ii in range(len(kinds))]
raw.info['chs'].extend(chpi_chs)
raw.info._update_redundant()
raw.info._check_consistency()
assert raw._data.shape == (raw.info['nchan'], len(raw.times))
# Return the pos picks
pos_picks = np.arange(len(raw.ch_names) - len(chpi_chs),
len(raw.ch_names))
return raw, pos_picks
else:
if not raw.preload:
logger.info(' Loading raw data from disk')
raw.load_data(verbose=False)
else:
logger.info(' Using loaded raw data')
return raw, np.array([], int)
def _check_pos(pos, head_frame, raw, st_fixed, sfreq):
"""Check for a valid pos array and transform it to a more usable form."""
if pos is None:
return [None, np.array([-1])]
if not head_frame:
raise ValueError('positions can only be used if coord_frame="head"')
if not st_fixed:
warn('st_fixed=False is untested, use with caution!')
if not isinstance(pos, np.ndarray):
raise TypeError('pos must be an ndarray')
if pos.ndim != 2 or pos.shape[1] != 10:
raise ValueError('pos must be an array of shape (N, 10)')
t = pos[:, 0]
t_off = raw.first_samp / raw.info['sfreq']
if not np.array_equal(t, np.unique(t)):
raise ValueError('Time points must unique and in ascending order')
# We need an extra 1e-3 (1 ms) here because MaxFilter outputs values
# only out to 3 decimal places
if not _time_mask(t, tmin=t_off - 1e-3, tmax=None, sfreq=sfreq).all():
raise ValueError('Head position time points must be greater than '
'first sample offset, but found %0.4f < %0.4f'
% (t[0], t_off))
max_dist = np.sqrt(np.sum(pos[:, 4:7] ** 2, axis=1)).max()
if max_dist > 1.:
warn('Found a distance greater than 1 m (%0.3g m) from the device '
'origin, positions may be invalid and Maxwell filtering could '
'fail' % (max_dist,))
dev_head_ts = np.zeros((len(t), 4, 4))
dev_head_ts[:, 3, 3] = 1.
dev_head_ts[:, :3, 3] = pos[:, 4:7]
dev_head_ts[:, :3, :3] = quat_to_rot(pos[:, 1:4])
pos = [dev_head_ts, t - t_off, pos[:, 1:]]
return pos
def _get_decomp(trans, all_coils, cal, regularize, exp, ignore_ref,
coil_scale, grad_picks, mag_picks, good_picks, mag_or_fine,
bad_condition, t, mag_scale):
"""Get a decomposition matrix and pseudoinverse matrices."""
#
# Fine calibration processing (point-like magnetometers and calib. coeffs)
#
S_decomp = _get_s_decomp(exp, all_coils, trans, coil_scale, cal,
ignore_ref, grad_picks, mag_picks, good_picks,
mag_scale)
#
# Regularization
#
S_decomp, pS_decomp, sing, reg_moments, n_use_in = _regularize(
regularize, exp, S_decomp, mag_or_fine, t=t)
# Pseudo-inverse of total multipolar moment basis set (Part of Eq. 37)
cond = sing[0] / sing[-1]
logger.debug(' Decomposition matrix condition: %0.1f' % cond)
if bad_condition != 'ignore' and cond >= 1000.:
msg = 'Matrix is badly conditioned: %0.0f >= 1000' % cond
if bad_condition == 'error':
raise RuntimeError(msg)
else: # condition == 'warning':
warn(msg)
# Build in our data scaling here
pS_decomp *= coil_scale[good_picks].T
S_decomp /= coil_scale[good_picks]
return S_decomp, pS_decomp, reg_moments, n_use_in
def _get_s_decomp(exp, all_coils, trans, coil_scale, cal, ignore_ref,
grad_picks, mag_picks, good_picks, mag_scale):
"""Get S_decomp."""
S_decomp = _trans_sss_basis(exp, all_coils, trans, coil_scale)
if cal is not None:
# Compute point-like mags to incorporate gradiometer imbalance
grad_cals = _sss_basis_point(exp, trans, cal, ignore_ref, mag_scale)
# Add point like magnetometer data to bases.
S_decomp[grad_picks, :] += grad_cals
# Scale magnetometers by calibration coefficient
S_decomp[mag_picks, :] /= cal['mag_cals']
# We need to be careful about KIT gradiometers
S_decomp = S_decomp[good_picks]
return S_decomp
@verbose
def _regularize(regularize, exp, S_decomp, mag_or_fine, t, verbose=None):
"""Regularize a decomposition matrix."""
# ALWAYS regularize the out components according to norm, since
# gradiometer-only setups (e.g., KIT) can have zero first-order
# (homogeneous field) components
int_order, ext_order = exp['int_order'], exp['ext_order']
n_in, n_out = _get_n_moments([int_order, ext_order])
t_str = '%8.3f' % t
if regularize is not None: # regularize='in'
in_removes, out_removes = _regularize_in(
int_order, ext_order, S_decomp, mag_or_fine)
else:
in_removes = []
out_removes = _regularize_out(int_order, ext_order, mag_or_fine)
reg_in_moments = np.setdiff1d(np.arange(n_in), in_removes)
reg_out_moments = np.setdiff1d(np.arange(n_in, n_in + n_out),
out_removes)
n_use_in = len(reg_in_moments)
n_use_out = len(reg_out_moments)
reg_moments = np.concatenate((reg_in_moments, reg_out_moments))
S_decomp = S_decomp.take(reg_moments, axis=1)
pS_decomp, sing = _col_norm_pinv(S_decomp.copy())
if regularize is not None or n_use_out != n_out:
logger.info(' Using %s/%s harmonic components for %s '
'(%s/%s in, %s/%s out)'
% (n_use_in + n_use_out, n_in + n_out, t_str,
n_use_in, n_in, n_use_out, n_out))
return S_decomp, pS_decomp, sing, reg_moments, n_use_in
def _get_mf_picks(info, int_order, ext_order, ignore_ref=False):
"""Pick types for Maxwell filtering."""
# Check for T1/T2 mag types
mag_inds_T1T2 = _get_T1T2_mag_inds(info)
if len(mag_inds_T1T2) > 0:
warn('%d T1/T2 magnetometer channel types found. If using SSS, it is '
'advised to replace coil types using "fix_mag_coil_types".'
% len(mag_inds_T1T2))
# Get indices of channels to use in multipolar moment calculation
ref = not ignore_ref
meg_picks = pick_types(info, meg=True, ref_meg=ref, exclude=[])
meg_info = pick_info(_simplify_info(info), meg_picks)
del info
good_picks = pick_types(meg_info, meg=True, ref_meg=ref, exclude='bads')
n_bases = _get_n_moments([int_order, ext_order]).sum()
if n_bases > len(good_picks):
raise ValueError('Number of requested bases (%s) exceeds number of '
'good sensors (%s)' % (str(n_bases), len(good_picks)))
recons = [ch for ch in meg_info['bads']]
if len(recons) > 0:
logger.info(' Bad MEG channels being reconstructed: %s' % recons)
else:
logger.info(' No bad MEG channels')
ref_meg = False if ignore_ref else 'mag'
mag_picks = pick_types(meg_info, meg='mag', ref_meg=ref_meg, exclude=[])
ref_meg = False if ignore_ref else 'grad'
grad_picks = pick_types(meg_info, meg='grad', ref_meg=ref_meg, exclude=[])
assert len(mag_picks) + len(grad_picks) == len(meg_info['ch_names'])
# Determine which are magnetometers for external basis purposes
mag_or_fine = np.zeros(len(meg_picks), bool)
mag_or_fine[mag_picks] = True
# KIT gradiometers are marked as having units T, not T/M (argh)
# We need a separate variable for this because KIT grads should be
# treated mostly like magnetometers (e.g., scaled by 100) for reg
coil_types = np.array([ch['coil_type'] for ch in meg_info['chs']])
mag_or_fine[(coil_types & 0xFFFF) == FIFF.FIFFV_COIL_KIT_GRAD] = False
# The same thing goes for CTF gradiometers...
ctf_grads = [FIFF.FIFFV_COIL_CTF_GRAD,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD]
mag_or_fine[np.in1d(coil_types, ctf_grads)] = False
msg = (' Processing %s gradiometers and %s magnetometers'
% (len(grad_picks), len(mag_picks)))
n_kit = len(mag_picks) - mag_or_fine.sum()
if n_kit > 0:
msg += ' (of which %s are actually KIT gradiometers)' % n_kit
logger.info(msg)
return meg_picks, mag_picks, grad_picks, good_picks, mag_or_fine
def _check_regularize(regularize):
"""Ensure regularize is valid."""
if not (regularize is None or (isinstance(regularize, string_types) and
regularize in ('in',))):
raise ValueError('regularize must be None or "in"')
def _check_usable(inst):
"""Ensure our data are clean."""
if inst.proj:
raise RuntimeError('Projectors cannot be applied to data during '
'Maxwell filtering.')
current_comp = inst.compensation_grade
if current_comp not in (0, None):
raise RuntimeError('Maxwell filter cannot be done on compensated '
'channels, but data have been compensated with '
'grade %s.' % current_comp)
def _col_norm_pinv(x):
"""Compute the pinv with column-normalization to stabilize calculation.
Note: will modify/overwrite x.
"""
norm = np.sqrt(np.sum(x * x, axis=0))
x /= norm
u, s, v = _safe_svd(x, full_matrices=False, **check_disable)
v /= norm
return np.dot(v.T * 1. / s, u.T), s
def _sq(x):
"""Square quickly."""
return x * x
def _check_finite(data):
"""Ensure data is finite."""
if not np.isfinite(data).all():
raise RuntimeError('data contains non-finite numbers')
def _sph_harm_norm(order, degree):
"""Compute normalization factor for spherical harmonics."""
# we could use scipy.special.poch(degree + order + 1, -2 * order)
# here, but it's slower for our fairly small degree
norm = np.sqrt((2 * degree + 1.) / (4 * np.pi))
if order != 0:
norm *= np.sqrt(factorial(degree - order) /
float(factorial(degree + order)))
return norm
def _concatenate_sph_coils(coils):
"""Concatenate MEG coil parameters for spherical harmoncs."""
rs = np.concatenate([coil['r0_exey'] for coil in coils])
wcoils = np.concatenate([coil['w'] for coil in coils])
ezs = np.concatenate([np.tile(coil['ez'][np.newaxis, :],
(len(coil['rmag']), 1))
for coil in coils])
bins = np.repeat(np.arange(len(coils)),
[len(coil['rmag']) for coil in coils])
return rs, wcoils, ezs, bins
_mu_0 = 4e-7 * np.pi # magnetic permeability
def _get_mag_mask(coils):
"""Get the coil_scale for Maxwell filtering."""
return np.array([coil['coil_class'] == FIFF.FWD_COILC_MAG
for coil in coils])
def _sss_basis_basic(exp, coils, mag_scale=100., method='standard'):
"""Compute SSS basis using non-optimized (but more readable) algorithms."""
int_order, ext_order = exp['int_order'], exp['ext_order']
origin = exp['origin']
# Compute vector between origin and coil, convert to spherical coords
if method == 'standard':
# Get position, normal, weights, and number of integration pts.
rmags, cosmags, ws, bins = _concatenate_coils(coils)
rmags -= origin
# Convert points to spherical coordinates
rad, az, pol = _cart_to_sph(rmags).T
cosmags *= ws[:, np.newaxis]
del rmags, ws
out_type = np.float64
else: # testing equivalence method
rs, wcoils, ezs, bins = _concatenate_sph_coils(coils)
rs -= origin
rad, az, pol = _cart_to_sph(rs).T
ezs *= wcoils[:, np.newaxis]
del rs, wcoils
out_type = np.complex128
del origin
# Set up output matrices
n_in, n_out = _get_n_moments([int_order, ext_order])
S_tot = np.empty((len(coils), n_in + n_out), out_type)
S_in = S_tot[:, :n_in]
S_out = S_tot[:, n_in:]
coil_scale = np.ones((len(coils), 1))
coil_scale[_get_mag_mask(coils)] = mag_scale
# Compute internal/external basis vectors (exclude degree 0; L/RHS Eq. 5)
for degree in range(1, max(int_order, ext_order) + 1):
# Only loop over positive orders, negative orders are handled
# for efficiency within
for order in range(degree + 1):
S_in_out = list()
grads_in_out = list()
# Same spherical harmonic is used for both internal and external
sph = _get_sph_harm()(order, degree, az, pol)
sph_norm = _sph_harm_norm(order, degree)
# Compute complex gradient for all integration points
# in spherical coordinates (Eq. 6). The gradient for rad, az, pol
# is obtained by taking the partial derivative of Eq. 4 w.r.t. each
# coordinate.
az_factor = 1j * order * sph / np.sin(np.maximum(pol, 1e-16))
pol_factor = (-sph_norm * np.sin(pol) * np.exp(1j * order * az) *
_alegendre_deriv(order, degree, np.cos(pol)))
if degree <= int_order:
S_in_out.append(S_in)
in_norm = _mu_0 * rad ** -(degree + 2)
g_rad = in_norm * (-(degree + 1.) * sph)
g_az = in_norm * az_factor
g_pol = in_norm * pol_factor
grads_in_out.append(_sph_to_cart_partials(az, pol,
g_rad, g_az, g_pol))
if degree <= ext_order:
S_in_out.append(S_out)
out_norm = _mu_0 * rad ** (degree - 1)
g_rad = out_norm * degree * sph
g_az = out_norm * az_factor
g_pol = out_norm * pol_factor
grads_in_out.append(_sph_to_cart_partials(az, pol,
g_rad, g_az, g_pol))
for spc, grads in zip(S_in_out, grads_in_out):
# We could convert to real at the end, but it's more efficient
# to do it now
if method == 'standard':
grads_pos_neg = [_sh_complex_to_real(grads, order)]
orders_pos_neg = [order]
# Deal with the negative orders
if order > 0:
# it's faster to use the conjugation property for
# our normalized spherical harmonics than recalculate
grads_pos_neg.append(_sh_complex_to_real(
_sh_negate(grads, order), -order))
orders_pos_neg.append(-order)
for gr, oo in zip(grads_pos_neg, orders_pos_neg):
# Gradients dotted w/integration point weighted normals
gr = einsum('ij,ij->i', gr, cosmags)
vals = np.bincount(bins, gr, len(coils))
spc[:, _deg_ord_idx(degree, oo)] = -vals
else:
grads = einsum('ij,ij->i', grads, ezs)
v = (np.bincount(bins, grads.real, len(coils)) +
1j * np.bincount(bins, grads.imag, len(coils)))
spc[:, _deg_ord_idx(degree, order)] = -v
if order > 0:
spc[:, _deg_ord_idx(degree, -order)] = \
-_sh_negate(v, order)
# Scale magnetometers
S_tot *= coil_scale
if method != 'standard':
# Eventually we could probably refactor this for 2x mem (and maybe CPU)
# savings by changing how spc/S_tot is assigned above (real only)
S_tot = _bases_complex_to_real(S_tot, int_order, ext_order)
return S_tot
def _sss_basis(exp, all_coils):
"""Compute SSS basis for given conditions.
Parameters
----------
exp : dict
Must contain the following keys:
origin : ndarray, shape (3,)
Origin of the multipolar moment space in millimeters
int_order : int
Order of the internal multipolar moment space
ext_order : int
Order of the external multipolar moment space
coils : list
List of MEG coils. Each should contain coil information dict specifying
position, normals, weights, number of integration points and channel
type. All coil geometry must be in the same coordinate frame
as ``origin`` (``head`` or ``meg``).
Returns
-------
bases : ndarray, shape (n_coils, n_mult_moments)
Internal and external basis sets as a single ndarray.
Notes
-----
Does not incorporate magnetometer scaling factor or normalize spaces.
Adapted from code provided by Jukka Nenonen.
"""
rmags, cosmags, bins, n_coils = all_coils[:4]
int_order, ext_order = exp['int_order'], exp['ext_order']
n_in, n_out = _get_n_moments([int_order, ext_order])
S_tot = np.empty((n_coils, n_in + n_out), np.float64)
rmags = rmags - exp['origin']
S_in = S_tot[:, :n_in]
S_out = S_tot[:, n_in:]
# do the heavy lifting
max_order = max(int_order, ext_order)
L = _tabular_legendre(rmags, max_order)
phi = np.arctan2(rmags[:, 1], rmags[:, 0])
r_n = np.sqrt(np.sum(rmags * rmags, axis=1))
r_xy = np.sqrt(rmags[:, 0] * rmags[:, 0] + rmags[:, 1] * rmags[:, 1])
cos_pol = rmags[:, 2] / r_n # cos(theta); theta 0...pi
sin_pol = np.sqrt(1. - cos_pol * cos_pol) # sin(theta)
z_only = (r_xy <= 1e-16)
r_xy[z_only] = 1.
cos_az = rmags[:, 0] / r_xy # cos(phi)
cos_az[z_only] = 1.
sin_az = rmags[:, 1] / r_xy # sin(phi)
sin_az[z_only] = 0.
del rmags
# Appropriate vector spherical harmonics terms
# JNE 2012-02-08: modified alm -> 2*alm, blm -> -2*blm
r_nn2 = r_n.copy()
r_nn1 = 1.0 / (r_n * r_n)
for degree in range(max_order + 1):
if degree <= ext_order:
r_nn1 *= r_n # r^(l-1)
if degree <= int_order:
r_nn2 *= r_n # r^(l+2)
# mu_0*sqrt((2l+1)/4pi (l-m)!/(l+m)!)
mult = 2e-7 * np.sqrt((2 * degree + 1) * np.pi)
if degree > 0:
idx = _deg_ord_idx(degree, 0)
# alpha
if degree <= int_order:
b_r = mult * (degree + 1) * L[degree][0] / r_nn2
b_pol = -mult * L[degree][1] / r_nn2
S_in[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, 0., b_pol,
cosmags, bins, n_coils)
# beta
if degree <= ext_order:
b_r = -mult * degree * L[degree][0] * r_nn1
b_pol = -mult * L[degree][1] * r_nn1
S_out[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, 0., b_pol,
cosmags, bins, n_coils)
for order in range(1, degree + 1):
ord_phi = order * phi
sin_order = np.sin(ord_phi)
cos_order = np.cos(ord_phi)
mult /= np.sqrt((degree - order + 1) * (degree + order))
factor = mult * np.sqrt(2) # equivalence fix (Elekta uses 2.)
# Real
idx = _deg_ord_idx(degree, order)
r_fact = factor * L[degree][order] * cos_order
az_fact = factor * order * sin_order * L[degree][order]
pol_fact = -factor * (L[degree][order + 1] -
(degree + order) * (degree - order + 1) *
L[degree][order - 1]) * cos_order
# alpha
if degree <= int_order:
b_r = (degree + 1) * r_fact / r_nn2
b_az = az_fact / (sin_pol * r_nn2)
b_az[z_only] = 0.
b_pol = pol_fact / (2 * r_nn2)
S_in[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
# beta
if degree <= ext_order:
b_r = -degree * r_fact * r_nn1
b_az = az_fact * r_nn1 / sin_pol
b_az[z_only] = 0.
b_pol = pol_fact * r_nn1 / 2.
S_out[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
# Imaginary
idx = _deg_ord_idx(degree, -order)
r_fact = factor * L[degree][order] * sin_order
az_fact = factor * order * cos_order * L[degree][order]
pol_fact = factor * (L[degree][order + 1] -
(degree + order) * (degree - order + 1) *
L[degree][order - 1]) * sin_order
# alpha
if degree <= int_order:
b_r = -(degree + 1) * r_fact / r_nn2
b_az = az_fact / (sin_pol * r_nn2)
b_az[z_only] = 0.
b_pol = pol_fact / (2 * r_nn2)
S_in[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
# beta
if degree <= ext_order:
b_r = degree * r_fact * r_nn1
b_az = az_fact * r_nn1 / sin_pol
b_az[z_only] = 0.
b_pol = pol_fact * r_nn1 / 2.
S_out[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
return S_tot
def _integrate_points(cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils):
"""Integrate points in spherical coords."""
grads = _sp_to_cart(cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol).T
grads = einsum('ij,ij->i', grads, cosmags)
return np.bincount(bins, grads, n_coils)
def _tabular_legendre(r, nind):
"""Compute associated Legendre polynomials."""
r_n = np.sqrt(np.sum(r * r, axis=1))
x = r[:, 2] / r_n # cos(theta)
L = list()
for degree in range(nind + 1):
L.append(np.zeros((degree + 2, len(r))))
L[0][0] = 1.
pnn = 1.
fact = 1.
sx2 = np.sqrt((1. - x) * (1. + x))
for degree in range(nind + 1):
L[degree][degree] = pnn
pnn *= (-fact * sx2)
fact += 2.
if degree < nind:
L[degree + 1][degree] = x * (2 * degree + 1) * L[degree][degree]
if degree >= 2:
for order in range(degree - 1):
L[degree][order] = (x * (2 * degree - 1) *
L[degree - 1][order] -
(degree + order - 1) *
L[degree - 2][order]) / (degree - order)
return L
def _sp_to_cart(cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol):
"""Convert spherical coords to cartesian."""
return np.array([(sin_pol * cos_az * b_r +
cos_pol * cos_az * b_pol - sin_az * b_az),
(sin_pol * sin_az * b_r +
cos_pol * sin_az * b_pol + cos_az * b_az),
cos_pol * b_r - sin_pol * b_pol])
def _get_degrees_orders(order):
"""Get the set of degrees used in our basis functions."""
degrees = np.zeros(_get_n_moments(order), int)
orders = np.zeros_like(degrees)
for degree in range(1, order + 1):
# Only loop over positive orders, negative orders are handled
# for efficiency within
for order in range(degree + 1):
ii = _deg_ord_idx(degree, order)
degrees[ii] = degree
orders[ii] = order
ii = _deg_ord_idx(degree, -order)
degrees[ii] = degree
orders[ii] = -order
return degrees, orders
def _alegendre_deriv(order, degree, val):
"""Compute the derivative of the associated Legendre polynomial at a value.
Parameters
----------
order : int
Order of spherical harmonic. (Usually) corresponds to 'm'.
degree : int
Degree of spherical harmonic. (Usually) corresponds to 'l'.
val : float
Value to evaluate the derivative at.
Returns
-------
dPlm : float
Associated Legendre function derivative
"""
from scipy.special import lpmv
assert order >= 0
return (order * val * lpmv(order, degree, val) + (degree + order) *
(degree - order + 1.) * np.sqrt(1. - val * val) *
lpmv(order - 1, degree, val)) / (1. - val * val)
def _bases_complex_to_real(complex_tot, int_order, ext_order):
"""Convert complex spherical harmonics to real."""
n_in, n_out = _get_n_moments([int_order, ext_order])
complex_in = complex_tot[:, :n_in]
complex_out = complex_tot[:, n_in:]
real_tot = np.empty(complex_tot.shape, np.float64)
real_in = real_tot[:, :n_in]
real_out = real_tot[:, n_in:]
for comp, real, exp_order in zip([complex_in, complex_out],
[real_in, real_out],
[int_order, ext_order]):
for deg in range(1, exp_order + 1):
for order in range(deg + 1):
idx_pos = _deg_ord_idx(deg, order)
idx_neg = _deg_ord_idx(deg, -order)
real[:, idx_pos] = _sh_complex_to_real(comp[:, idx_pos], order)
if order != 0:
# This extra mult factor baffles me a bit, but it works
# in round-trip testing, so we'll keep it :(
mult = (-1 if order % 2 == 0 else 1)
real[:, idx_neg] = mult * _sh_complex_to_real(
comp[:, idx_neg], -order)
return real_tot
def _bases_real_to_complex(real_tot, int_order, ext_order):
"""Convert real spherical harmonics to complex."""
n_in, n_out = _get_n_moments([int_order, ext_order])
real_in = real_tot[:, :n_in]
real_out = real_tot[:, n_in:]
comp_tot = np.empty(real_tot.shape, np.complex128)
comp_in = comp_tot[:, :n_in]
comp_out = comp_tot[:, n_in:]
for real, comp, exp_order in zip([real_in, real_out],
[comp_in, comp_out],
[int_order, ext_order]):
for deg in range(1, exp_order + 1):
# only loop over positive orders, figure out neg from pos
for order in range(deg + 1):
idx_pos = _deg_ord_idx(deg, order)
idx_neg = _deg_ord_idx(deg, -order)
this_comp = _sh_real_to_complex([real[:, idx_pos],
real[:, idx_neg]], order)
comp[:, idx_pos] = this_comp
comp[:, idx_neg] = _sh_negate(this_comp, order)
return comp_tot
def _check_info(info, sss=True, tsss=True, calibration=True, ctc=True):
"""Ensure that Maxwell filtering has not been applied yet."""
for ent in info['proc_history']:
for msg, key, doing in (('SSS', 'sss_info', sss),
('tSSS', 'max_st', tsss),
('fine calibration', 'sss_cal', calibration),
('cross-talk cancellation', 'sss_ctc', ctc)):
if not doing:
continue
if len(ent['max_info'][key]) > 0:
raise RuntimeError('Maxwell filtering %s step has already '
'been applied, cannot reapply' % msg)
def _update_sss_info(raw, origin, int_order, ext_order, nchan, coord_frame,
sss_ctc, sss_cal, max_st, reg_moments, st_only):
"""Update info inplace after Maxwell filtering.
Parameters
----------
raw : instance of mne.io.Raw
Data to be filtered
origin : array-like, shape (3,)
Origin of internal and external multipolar moment space in head coords
and in millimeters
int_order : int
Order of internal component of spherical expansion
ext_order : int
Order of external component of spherical expansion
nchan : int
Number of sensors
sss_ctc : dict
The cross talk information.
sss_cal : dict
The calibration information.
max_st : dict
The tSSS information.
reg_moments : ndarray | slice
The moments that were used.
st_only : bool
Whether tSSS only was performed.
"""
n_in, n_out = _get_n_moments([int_order, ext_order])
raw.info['maxshield'] = False
components = np.zeros(n_in + n_out).astype('int32')
components[reg_moments] = 1
sss_info_dict = dict(in_order=int_order, out_order=ext_order,
nchan=nchan, origin=origin.astype('float32'),
job=np.array([2]), nfree=np.sum(components[:n_in]),
frame=_str_to_frame[coord_frame],
components=components)
max_info_dict = dict(max_st=max_st)
if st_only:
max_info_dict.update(sss_info=dict(), sss_cal=dict(), sss_ctc=dict())
else:
max_info_dict.update(sss_info=sss_info_dict, sss_cal=sss_cal,
sss_ctc=sss_ctc)
# Reset 'bads' for any MEG channels since they've been reconstructed
_reset_meg_bads(raw.info)
block_id = _generate_meas_id()
raw.info['proc_history'].insert(0, dict(
max_info=max_info_dict, block_id=block_id, date=DATE_NONE,
creator='mne-python v%s' % __version__, experimenter=''))
def _reset_meg_bads(info):
"""Reset MEG bads."""
meg_picks = pick_types(info, meg=True, exclude=[])
info['bads'] = [bad for bad in info['bads']
if info['ch_names'].index(bad) not in meg_picks]
check_disable = dict() # not available on really old versions of SciPy
if 'check_finite' in _get_args(linalg.svd):
check_disable['check_finite'] = False
def _orth_overwrite(A):
"""Create a slightly more efficient 'orth'."""
# adapted from scipy/linalg/decomp_svd.py
u, s = _safe_svd(A, full_matrices=False, **check_disable)[:2]
M, N = A.shape
eps = np.finfo(float).eps
tol = max(M, N) * np.amax(s) * eps
num = np.sum(s > tol, dtype=int)
return u[:, :num]
def _overlap_projector(data_int, data_res, corr):
"""Calculate projector for removal of subspace intersection in tSSS."""
# corr necessary to deal with noise when finding identical signal
# directions in the subspace. See the end of the Results section in [2]_
# Note that the procedure here is an updated version of [2]_ (and used in
# Elekta's tSSS) that uses residuals instead of internal/external spaces
# directly. This provides more degrees of freedom when analyzing for
# intersections between internal and external spaces.
# Normalize data, then compute orth to get temporal bases. Matrices
# must have shape (n_samps x effective_rank) when passed into svd
# computation
# we use np.linalg.norm instead of sp.linalg.norm here: ~2x faster!
n = np.linalg.norm(data_int)
Q_int = linalg.qr(_orth_overwrite((data_int / n).T),
overwrite_a=True, mode='economic', **check_disable)[0].T
n = np.linalg.norm(data_res)
Q_res = linalg.qr(_orth_overwrite((data_res / n).T),
overwrite_a=True, mode='economic', **check_disable)[0]
assert data_int.shape[1] > 0
C_mat = np.dot(Q_int, Q_res)
del Q_int
# Compute angles between subspace and which bases to keep
S_intersect, Vh_intersect = _safe_svd(C_mat, full_matrices=False,
**check_disable)[1:]
del C_mat
intersect_mask = (S_intersect >= corr)
del S_intersect
# Compute projection operator as (I-LL_T) Eq. 12 in [2]_
# V_principal should be shape (n_time_pts x n_retained_inds)
Vh_intersect = Vh_intersect[intersect_mask].T
V_principal = np.dot(Q_res, Vh_intersect)
return V_principal
def _update_sensor_geometry(info, fine_cal, ignore_ref):
"""Replace sensor geometry information and reorder cal_chs."""
from ._fine_cal import read_fine_calibration
logger.info(' Using fine calibration %s' % op.basename(fine_cal))
fine_cal = read_fine_calibration(fine_cal) # filename -> dict
ch_names = _clean_names(info['ch_names'], remove_whitespace=True)
info_to_cal = dict()
missing = list()
for ci, name in enumerate(fine_cal['ch_names']):
if name not in ch_names:
missing.append(name)
else:
oi = ch_names.index(name)
info_to_cal[oi] = ci
meg_picks = pick_types(info, meg=True, exclude=[])
if len(info_to_cal) != len(meg_picks):
raise RuntimeError(
'Not all MEG channels found in fine calibration file, missing:\n%s'
% sorted(list(set(ch_names[pick] for pick in meg_picks) -
set(fine_cal['ch_names']))))
if len(missing):
warn('Found cal channel%s not in data: %s' % (_pl(missing), missing))
grad_picks = pick_types(info, meg='grad', exclude=())
mag_picks = pick_types(info, meg='mag', exclude=())
# Determine gradiometer imbalances and magnetometer calibrations
grad_imbalances = np.array([fine_cal['imb_cals'][info_to_cal[gi]]
for gi in grad_picks]).T
if grad_imbalances.shape[0] not in [1, 3]:
raise ValueError('Must have 1 (x) or 3 (x, y, z) point-like ' +
'magnetometers. Currently have %i' %
grad_imbalances.shape[0])
mag_cals = np.array([fine_cal['imb_cals'][info_to_cal[mi]]
for mi in mag_picks])
# Now let's actually construct our point-like adjustment coils for grads
grad_coilsets = _get_grad_point_coilsets(
info, n_types=len(grad_imbalances), ignore_ref=ignore_ref)
calibration = dict(grad_imbalances=grad_imbalances,
grad_coilsets=grad_coilsets, mag_cals=mag_cals)
# Replace sensor locations (and track differences) for fine calibration
ang_shift = np.zeros((len(fine_cal['ch_names']), 3))
used = np.zeros(len(info['chs']), bool)
cal_corrs = list()
cal_chans = list()
adjust_logged = False
for oi, ci in info_to_cal.items():
assert ch_names[oi] == fine_cal['ch_names'][ci]
assert not used[oi]
used[oi] = True
info_ch = info['chs'][oi]
ch_num = int(fine_cal['ch_names'][ci].lstrip('MEG').lstrip('0'))
cal_chans.append([ch_num, info_ch['coil_type']])
# Some .dat files might only rotate EZ, so we must check first that
# EX and EY are orthogonal to EZ. If not, we find the rotation between
# the original and fine-cal ez, and rotate EX and EY accordingly:
ch_coil_rot = _loc_to_coil_trans(info_ch['loc'])[:3, :3]
cal_loc = fine_cal['locs'][ci].copy()
cal_coil_rot = _loc_to_coil_trans(cal_loc)[:3, :3]
if np.max([np.abs(np.dot(cal_coil_rot[:, ii], cal_coil_rot[:, 2]))
for ii in range(2)]) > 1e-6: # X or Y not orthogonal
if not adjust_logged:
logger.info(' Adjusting non-orthogonal EX and EY')
adjust_logged = True
# find the rotation matrix that goes from one to the other
this_trans = _find_vector_rotation(ch_coil_rot[:, 2],
cal_coil_rot[:, 2])
cal_loc[3:] = np.dot(this_trans, ch_coil_rot).T.ravel()
# calculate shift angle
v1 = _loc_to_coil_trans(cal_loc)[:3, :3]
_normalize_vectors(v1)
v2 = _loc_to_coil_trans(info_ch['loc'])[:3, :3]
_normalize_vectors(v2)
ang_shift[ci] = np.sum(v1 * v2, axis=0)
if oi in grad_picks:
extra = [1., fine_cal['imb_cals'][ci][0]]
else:
extra = [fine_cal['imb_cals'][ci][0], 0.]
cal_corrs.append(np.concatenate([extra, cal_loc]))
# Adjust channel normal orientations with those from fine calibration
# Channel positions are not changed
info_ch['loc'][3:] = cal_loc[3:]
assert (info_ch['coord_frame'] == FIFF.FIFFV_COORD_DEVICE)
assert used[meg_picks].all()
assert not used[np.setdiff1d(np.arange(len(used)), meg_picks)].any()
ang_shift = ang_shift[list(info_to_cal.values())] # subselect used ones
# This gets written to the Info struct
sss_cal = dict(cal_corrs=np.array(cal_corrs),
cal_chans=np.array(cal_chans))
# Log quantification of sensor changes
# Deal with numerical precision giving absolute vals slightly more than 1.
np.clip(ang_shift, -1., 1., ang_shift)
np.rad2deg(np.arccos(ang_shift), ang_shift) # Convert to degrees
logger.info(' Adjusted coil positions by (μ ± σ): '
'%0.1f° ± %0.1f° (max: %0.1f°)' %
(np.mean(ang_shift), np.std(ang_shift),
np.max(np.abs(ang_shift))))
return calibration, sss_cal
def _get_grad_point_coilsets(info, n_types, ignore_ref):
"""Get point-type coilsets for gradiometers."""
grad_coilsets = list()
grad_info = pick_info(
_simplify_info(info), pick_types(info, meg='grad', exclude=[]))
# Coil_type values for x, y, z point magnetometers
# Note: 1D correction files only have x-direction corrections
pt_types = [FIFF.FIFFV_COIL_POINT_MAGNETOMETER_X,
FIFF.FIFFV_COIL_POINT_MAGNETOMETER_Y,
FIFF.FIFFV_COIL_POINT_MAGNETOMETER]
for pt_type in pt_types[:n_types]:
for ch in grad_info['chs']:
ch['coil_type'] = pt_type
grad_coilsets.append(_prep_mf_coils(grad_info, ignore_ref))
return grad_coilsets
def _sss_basis_point(exp, trans, cal, ignore_ref=False, mag_scale=100.):
"""Compute multipolar moments for point-like mags (in fine cal)."""
# Loop over all coordinate directions desired and create point mags
S_tot = 0.
# These are magnetometers, so use a uniform coil_scale of 100.
this_cs = np.array([mag_scale], float)
for imb, coils in zip(cal['grad_imbalances'], cal['grad_coilsets']):
S_add = _trans_sss_basis(exp, coils, trans, this_cs)
# Scale spaces by gradiometer imbalance
S_add *= imb[:, np.newaxis]
S_tot += S_add
# Return point-like mag bases
return S_tot
def _regularize_out(int_order, ext_order, mag_or_fine):
"""Regularize out components based on norm."""
n_in = _get_n_moments(int_order)
out_removes = list(np.arange(0 if mag_or_fine.any() else 3) + n_in)
return list(out_removes)
def _regularize_in(int_order, ext_order, S_decomp, mag_or_fine):
"""Regularize basis set using idealized SNR measure."""
n_in, n_out = _get_n_moments([int_order, ext_order])
# The "signal" terms depend only on the inner expansion order
# (i.e., not sensor geometry or head position / expansion origin)
a_lm_sq, rho_i = _compute_sphere_activation_in(
np.arange(int_order + 1))
degrees, orders = _get_degrees_orders(int_order)
a_lm_sq = a_lm_sq[degrees]
I_tots = np.zeros(n_in) # we might not traverse all, so use np.zeros
in_keepers = list(range(n_in))
out_removes = _regularize_out(int_order, ext_order, mag_or_fine)
out_keepers = list(np.setdiff1d(np.arange(n_in, n_in + n_out),
out_removes))
remove_order = []
S_decomp = S_decomp.copy()
use_norm = np.sqrt(np.sum(S_decomp * S_decomp, axis=0))
S_decomp /= use_norm
eigs = np.zeros((n_in, 2))
# plot = False # for debugging
# if plot:
# import matplotlib.pyplot as plt
# fig, axs = plt.subplots(3, figsize=[6, 12])
# plot_ord = np.empty(n_in, int)
# plot_ord.fill(-1)
# count = 0
# # Reorder plot to match MF
# for degree in range(1, int_order + 1):
# for order in range(0, degree + 1):
# assert plot_ord[count] == -1
# plot_ord[count] = _deg_ord_idx(degree, order)
# count += 1
# if order > 0:
# assert plot_ord[count] == -1
# plot_ord[count] = _deg_ord_idx(degree, -order)
# count += 1
# assert count == n_in
# assert (plot_ord >= 0).all()
# assert len(np.unique(plot_ord)) == n_in
noise_lev = 5e-13 # noise level in T/m
noise_lev *= noise_lev # effectively what would happen by earlier multiply
for ii in range(n_in):
this_S = S_decomp.take(in_keepers + out_keepers, axis=1)
u, s, v = _safe_svd(this_S, full_matrices=False, **check_disable)
del this_S
eigs[ii] = s[[0, -1]]
v = v.T[:len(in_keepers)]
v /= use_norm[in_keepers][:, np.newaxis]
eta_lm_sq = np.dot(v * 1. / s, u.T)
del u, s, v
eta_lm_sq *= eta_lm_sq
eta_lm_sq = eta_lm_sq.sum(axis=1)
eta_lm_sq *= noise_lev
# Mysterious scale factors to match Elekta, likely due to differences
# in the basis normalizations...
eta_lm_sq[orders[in_keepers] == 0] *= 2
eta_lm_sq *= 0.0025
snr = a_lm_sq[in_keepers] / eta_lm_sq
I_tots[ii] = 0.5 * np.log2(snr + 1.).sum()
remove_order.append(in_keepers[np.argmin(snr)])
in_keepers.pop(in_keepers.index(remove_order[-1]))
# heuristic to quit if we're past the peak to save cycles
if ii > 10 and (I_tots[ii - 1:ii + 1] < 0.95 * I_tots.max()).all():
break
# if plot and ii == 0:
# axs[0].semilogy(snr[plot_ord[in_keepers]], color='k')
# if plot:
# axs[0].set(ylabel='SNR', ylim=[0.1, 500], xlabel='Component')
# axs[1].plot(I_tots)
# axs[1].set(ylabel='Information', xlabel='Iteration')
# axs[2].plot(eigs[:, 0] / eigs[:, 1])
# axs[2].set(ylabel='Condition', xlabel='Iteration')
# Pick the components that give at least 98% of max info
# This is done because the curves can be quite flat, and we err on the
# side of including rather than excluding components
max_info = np.max(I_tots)
lim_idx = np.where(I_tots >= 0.98 * max_info)[0][0]
in_removes = remove_order[:lim_idx]
for ii, ri in enumerate(in_removes):
logger.debug(' Condition %0.3f/%0.3f = %03.1f, '
'Removing in component %s: l=%s, m=%+0.0f'
% (tuple(eigs[ii]) + (eigs[ii, 0] / eigs[ii, 1],
ri, degrees[ri], orders[ri])))
logger.debug(' Resulting information: %0.1f bits/sample '
'(%0.1f%% of peak %0.1f)'
% (I_tots[lim_idx], 100 * I_tots[lim_idx] / max_info,
max_info))
return in_removes, out_removes
def _compute_sphere_activation_in(degrees):
u"""Compute the "in" power from random currents in a sphere.
Parameters
----------
degrees : ndarray
The degrees to evaluate.
Returns
-------
a_power : ndarray
The a_lm associated for the associated degrees (see [1]_).
rho_i : float
The current density.
References
----------
.. [1] A 122-channel whole-cortex SQUID system for measuring the brain’s
magnetic fields. Knuutila et al. IEEE Transactions on Magnetics,
Vol 29 No 6, Nov 1993.
"""
r_in = 0.080 # radius of the randomly-activated sphere
# set the observation point r=r_s, az=el=0, so we can just look at m=0 term
# compute the resulting current density rho_i
# This is the "surface" version of the equation:
# b_r_in = 100e-15 # fixed radial field amplitude at distance r_s = 100 fT
# r_s = 0.13 # 5 cm from the surface
# rho_degrees = np.arange(1, 100)
# in_sum = (rho_degrees * (rho_degrees + 1.) /
# ((2. * rho_degrees + 1.)) *
# (r_in / r_s) ** (2 * rho_degrees + 2)).sum() * 4. * np.pi
# rho_i = b_r_in * 1e7 / np.sqrt(in_sum)
# rho_i = 5.21334885574e-07 # value for r_s = 0.125
rho_i = 5.91107375632e-07 # deterministic from above, so just store it
a_power = _sq(rho_i) * (degrees * r_in ** (2 * degrees + 4) /
(_sq(2. * degrees + 1.) *
(degrees + 1.)))
return a_power, rho_i
def _trans_sss_basis(exp, all_coils, trans=None, coil_scale=100.):
"""Compute SSS basis (optionally) using a dev<->head trans."""
if trans is not None:
if not isinstance(trans, Transform):
trans = Transform('meg', 'head', trans)
assert not np.isnan(trans['trans']).any()
all_coils = (apply_trans(trans, all_coils[0]),
apply_trans(trans, all_coils[1], move=False),
) + all_coils[2:]
if not isinstance(coil_scale, np.ndarray):
# Scale all magnetometers (with `coil_class` == 1.0) by `mag_scale`
cs = coil_scale
coil_scale = np.ones((all_coils[3], 1))
coil_scale[all_coils[4]] = cs
S_tot = _sss_basis(exp, all_coils)
S_tot *= coil_scale
return S_tot
| bsd-3-clause |
baspijhor/paparazzi | sw/misc/attitude_reference/att_ref_gui.py | 49 | 12483 | #!/usr/bin/env python
#
# Copyright (C) 2014 Antoine Drouin
#
# This file is part of paparazzi.
#
# paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
"""
This is a graphical user interface for playing with reference attitude
"""
# https://gist.github.com/zed/b966b5a04f2dfc16c98e
# https://gist.github.com/nzjrs/51686
# http://jakevdp.github.io/blog/2012/10/07/xkcd-style-plots-in-matplotlib/
# http://chimera.labs.oreilly.com/books/1230000000393/ch12.html#_problem_208 <- threads
# TODO:
# -cancel workers
#
#
#
from __future__ import print_function
from gi.repository import Gtk, GObject
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
import matplotlib.font_manager as fm
import math, threading, numpy as np, scipy.signal, pdb, copy, logging
import pat.utils as pu
import pat.algebra as pa
import control as ctl
import gui
class Reference(gui.Worker):
def __init__(self, sp, ref_impl=ctl.att_ref_default, omega=6., xi=0.8, max_vel=pu.rad_of_deg(100),
max_accel=pu.rad_of_deg(500)):
gui.Worker.__init__(self)
self.impl = ref_impl()
self.sp = sp
self.reset_outputs(sp)
self.update(sp, ref_impl, omega, xi, max_vel, max_accel)
self.do_work = True
def reset_outputs(self, sp):
self.euler = np.zeros((len(sp.time), pa.e_size))
self.quat = np.zeros((len(sp.time), pa.q_size))
self.vel = np.zeros((len(sp.time), pa.r_size))
self.accel = np.zeros((len(sp.time), pa.r_size))
def update_type(self, _type):
#print('update_type', _type)
self.impl = _type()
self.do_work = True
#self.recompute()
def update_param(self, p, v):
#print('update_param', p, v)
self.impl.set_param(p, v)
self.do_work = True
#self.recompute()
def update_sp(self, sp, ref_impl=None, omega=None, xi=None, max_vel=None, max_accel=None):
self.reset_outputs(sp)
self.update(sp, ref_impl, omega, xi, max_vel, max_accel)
self.do_work = True
#self.recompute()
def update(self, sp, ref_impl=None, omega=None, xi=None, max_vel=None, max_accel=None):
self.sp = sp
if ref_impl is not None:
self.impl = ref_impl()
if omega is not None:
self.impl.set_param('omega', omega)
if xi is not None:
self.impl.set_param('xi', xi)
if max_vel is not None:
self.impl.set_param('max_vel', max_vel)
if max_accel is not None:
self.impl.set_param('max_accel', max_accel)
def recompute(self):
#print("recomputing...")
self.start((self.sp,))
def _work_init(self, sp):
#print('_work_init ', self, self.impl, sp, sp.dt)
self.euler = np.zeros((len(sp.time), pa.e_size))
self.quat = np.zeros((len(sp.time), pa.q_size))
self.vel = np.zeros((len(sp.time), pa.r_size))
self.accel = np.zeros((len(sp.time), pa.r_size))
euler0 = [0.3, 0.1, 0.2]
self.impl.set_euler(np.array(euler0))
self.quat[0], self.euler[0], self.vel[0], self.accel[0] = self.impl.quat, self.impl.euler, self.impl.vel, self.impl.accel
self.n_iter_per_step = float(len(sp.time)) / self.n_step
def _work_step(self, i, sp):
start, stop = int(i * self.n_iter_per_step), int((i + 1) * self.n_iter_per_step)
# print('_work_step of %s: i %i, start %i, stop %i' % (self.impl, i, start, stop))
for j in range(start, stop):
self.impl.update_quat(sp.quat[j], sp.dt)
self.quat[j], self.vel[j], self.accel[j] = self.impl.quat, self.impl.vel, self.impl.accel
self.euler[j] = pa.euler_of_quat(self.quat[j])
class Setpoint(object):
t_static, t_step_phi, t_step_theta, t_step_psi, t_step_random, t_nb = range(0, 6)
t_names = ["constant", "step phi", "step theta", "step psi", "step_random"]
def __init__(self, type=t_static, duration=10., step_duration=5., step_ampl=pu.rad_of_deg(10.)):
self.dt = 1. / 512
self.update(type, duration, step_duration, step_ampl)
def update(self, type, duration, step_duration, step_ampl):
self.type = type
self.duration, self.step_duration, self.step_ampl = duration, step_duration, step_ampl
self.time = np.arange(0., self.duration, self.dt)
self.euler = np.zeros((len(self.time), pa.e_size))
try:
i = [Setpoint.t_step_phi, Setpoint.t_step_theta, Setpoint.t_step_psi].index(self.type)
self.euler[:, i] = step_ampl / 2 * scipy.signal.square(math.pi / step_duration * self.time)
except Exception as e:
print(e)
pass
self.quat = np.zeros((len(self.time), pa.q_size))
for i in range(0, len(self.time)):
self.quat[i] = pa.quat_of_euler(self.euler[i])
class GUI(object):
def __init__(self, sp, refs):
self.b = Gtk.Builder()
self.b.add_from_file("ressources/att_ref_gui.xml")
w = self.b.get_object("window")
w.connect("delete-event", Gtk.main_quit)
mb = self.b.get_object("main_vbox")
self.plot = Plot(sp, refs)
mb.pack_start(self.plot, True, True, 0)
mb = self.b.get_object("main_hbox")
ref_classes = [ctl.att_ref_default, ctl.att_ref_sat_naive, ctl.att_ref_sat_nested, ctl.att_ref_sat_nested2,
ctl.AttRefFloatNative, ctl.AttRefIntNative]
self.ref_views = [gui.AttRefParamView('<b>Ref {}</b>'.format(i+1), ref_classes=ref_classes,
active_impl=r.impl) for i, r in enumerate(refs)]
for r in self.ref_views:
mb.pack_start(r, True, True, 0)
w.show_all()
class Plot(Gtk.Frame):
def __init__(self, sp, refs):
Gtk.Frame.__init__(self)
self.f = Figure()
self.canvas = FigureCanvas(self.f)
self.add(self.canvas)
self.set_size_request(1024, 600)
self.f.subplots_adjust(left=0.07, right=0.98, bottom=0.05, top=0.95,
hspace=0.2, wspace=0.2)
# self.buffer = self.canvas.get_snapshot()
def decorate(self, axis, title=None, ylab=None, legend=None):
# font_prop = fm.FontProperties(fname='Humor-Sans-1.0.ttf', size=14)
if title is not None:
axis.set_title(title) # , fontproperties=font_prop)
if ylab is not None:
axis.yaxis.set_label_text(ylab) # , fontproperties=font_prop)
if legend is not None:
axis.legend(legend) # , prop=font_prop)
axis.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
axis.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
def update(self, sp, refs):
title = [r'$\phi$', r'$\theta$', r'$\psi$']
legend = ['Ref1', 'Ref2', 'Setpoint']
for i in range(0, 3):
axis = self.f.add_subplot(331 + i)
axis.clear()
for ref in refs:
axis.plot(sp.time, pu.deg_of_rad(ref.euler[:, i]))
axis.plot(sp.time, pu.deg_of_rad(sp.euler[:, i]))
self.decorate(axis, title[i], *(('deg', legend) if i == 0 else (None, None)))
title = [r'$p$', r'$q$', r'$r$']
for i in range(0, 3):
axis = self.f.add_subplot(334 + i)
axis.clear()
for ref in refs:
axis.plot(sp.time, pu.deg_of_rad(ref.vel[:, i]))
self.decorate(axis, title[i], 'deg/s' if i == 0 else None)
title = [r'$\dot{p}$', r'$\dot{q}$', r'$\dot{r}$']
for i in range(0, 3):
axis = self.f.add_subplot(337 + i)
axis.clear()
for ref in refs:
axis.plot(sp.time, pu.deg_of_rad(ref.accel[:, i]))
self.decorate(axis, title[i], 'deg/s2' if i == 0 else None)
self.canvas.draw()
class Application(object):
def __init__(self):
self.sp = Setpoint()
self.refs = [Reference(self.sp), Reference(self.sp, ref_impl=ctl.AttRefFloatNative)]
for nref, r in enumerate(self.refs):
r.connect("progress", self.on_ref_update_progress, nref + 1)
r.connect("completed", self.on_ref_update_completed, nref + 1)
self.gui = GUI(self.sp, self.refs)
self.register_gui()
self.recompute_sequentially()
def on_ref_update_progress(self, ref, v, nref):
#print('progress', nref, v)
self.gui.ref_views[nref - 1].progress.set_fraction(v)
def on_ref_update_completed(self, ref, nref):
#print('on_ref_update_completed', ref, nref)
self.gui.ref_views[nref - 1].progress.set_fraction(1.0)
# recompute remaining refs (if any)
self.recompute_sequentially()
self.gui.plot.update(self.sp, self.refs)
def register_gui(self):
self.register_setpoint()
for i in range(0, 2):
self.gui.ref_views[i].connect(self._on_ref_changed, self._on_ref_param_changed, self.refs[i], self.gui.ref_views[i])
self.gui.ref_views[i].update_view(self.refs[i].impl)
def register_setpoint(self):
b = self.gui.b
c_sp_type = b.get_object("combo_sp_type")
for n in Setpoint.t_names:
c_sp_type.append_text(n)
c_sp_type.set_active(self.sp.type)
c_sp_type.connect("changed", self.on_sp_changed)
names = ["spin_sp_duration", "spin_sp_step_duration", "spin_sp_step_amplitude"]
widgets = [b.get_object(name) for name in names]
adjs = [Gtk.Adjustment(self.sp.duration, 1, 100, 1, 10, 0),
Gtk.Adjustment(self.sp.step_duration, 0.1, 10., 0.1, 1., 0),
Gtk.Adjustment(pu.deg_of_rad(self.sp.step_ampl), 0.1, 180., 1, 10., 0)]
for i, w in enumerate(widgets):
w.set_adjustment(adjs[i])
w.update()
w.connect("value-changed", self.on_sp_changed)
def recompute_sequentially(self):
"""
Somehow running two threads to update both references at the same time produces bogus data..
As a workaround we simply run them one after the other.
"""
for r in self.refs:
if r.running:
return
for r in self.refs:
if r.do_work:
r.recompute()
return
def on_sp_changed(self, widget):
b = self.gui.b
_type = b.get_object("combo_sp_type").get_active()
names = ["spin_sp_duration", "spin_sp_step_duration", "spin_sp_step_amplitude"]
_duration, _step_duration, _step_amplitude = [b.get_object(name).get_value() for name in names]
#print('_on_sp_changed', _type, _duration, _step_duration, _step_amplitude)
_step_amplitude = pu.rad_of_deg(_step_amplitude)
self.sp.update(_type, _duration, _step_duration, _step_amplitude)
# somehow running two threads to update both references at the same time produces bogus data..
# as a workaround we simply run them one after the other
for r in self.refs:
r.update_sp(self.sp)
#r.recompute()
self.recompute_sequentially()
def _on_ref_changed(self, widget, ref, view):
#print('_on_ref_changed', widget, ref, view)
ref.update_type(view.get_selected_ref_class())
view.update_ref_params(ref.impl)
self.recompute_sequentially()
def _on_ref_param_changed(self, widget, p, ref, view):
#print("_on_ref_param_changed: %s %s=%s" % (ref.impl.name, p, val))
val = view.spin_cfg[p]['d2r'](widget.get_value())
ref.update_param(p, val)
self.recompute_sequentially()
def run(self):
Gtk.main()
if __name__ == "__main__":
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
Application().run()
| gpl-2.0 |
wanggang3333/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 129 | 7848 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
7even7/DAT210x | Module5/assignment4.py | 1 | 10105 | import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import matplotlib
#
# TODO: Parameters to play around with
PLOT_TYPE_TEXT = False # If you'd like to see indices
PLOT_VECTORS = True # If you'd like to see your original features in P.C.-Space
matplotlib.style.use('ggplot') # Look Pretty
c = ['red', 'green', 'blue', 'orange', 'yellow', 'brown']
def drawVectors(transformed_features, components_, columns, plt):
num_columns = len(columns)
# This function will project your *original* feature (columns)
# onto your principal component feature-space, so that you can
# visualize how "important" each one was in the
# multi-dimensional scaling
# Scale the principal components by the max value in
# the transformed set belonging to that component
xvector = components_[0] * max(transformed_features[:,0])
yvector = components_[1] * max(transformed_features[:,1])
## Visualize projections
# Sort each column by its length. These are your *original*
# columns, not the principal components.
import math
important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print "Projected Features by importance:\n", important_features
ax = plt.axes()
for i in range(num_columns):
# Use an arrow to project each original feature as a
# labeled vector on your principal component axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75, zorder=600000)
plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75, zorder=600000)
return ax
def doPCA(data, dimensions=2):
from sklearn.decomposition import RandomizedPCA
model = RandomizedPCA(n_components=dimensions)
model.fit(data)
return model
def doKMeans(data, clusters):
#
# TODO: Do the KMeans clustering here, passing in the # of clusters parameter
# and fit it against your data. Then, return a tuple containing the cluster
# centers and the labels
#
# .. your code here ..
model = KMeans(n_clusters=clusters).fit(data)
return model.cluster_centers_, model.labels_
#
# TODO: Load up the dataset. It has may or may not have nans in it. Make
# sure you catch them and destroy them, by setting them to '0'. This is valid
# for this dataset, since if the value is missing, you can assume no $ was spent
# on it.
#
# .. your code here ..
df = pd.read_csv('C:\Data\Projektit\DAT210x\Module5\Datasets\Wholesale customers data.csv')
#
# TODO: As instructed, get rid of the 'Channel' and 'Region' columns, since
# you'll be investigating as if this were a single location wholesaler, rather
# than a national / international one. Leaving these fields in here would cause
# KMeans to examine and give weight to them.
#
# .. your code here ..
df.drop(df[['Channel','Region' ]], axis=1, inplace=True)
#
# TODO: Before unitizing / standardizing / normalizing your data in preparation for
# K-Means, it's a good idea to get a quick peek at it. You can do this using the
# .describe() method, or even by using the built-in pandas df.plot.hist()
#
# .. your code here ..
#df.describe()
#df.plot.hist()
#
# INFO: Having checked out your data, you may have noticed there's a pretty big gap
# between the top customers in each feature category and the rest. Some feature
# scaling algos won't get rid of outliers for you, so it's a good idea to handle that
# manually---particularly if your goal is NOT to determine the top customers. After
# all, you can do that with a simple Pandas .sort_values() and not a machine
# learning clustering algorithm. From a business perspective, you're probably more
# interested in clustering your +/- 2 standard deviation customers, rather than the
# creme dela creme, or bottom of the barrel'ers
#
# Remove top 5 and bottom 5 samples for each column:
drop = {}
for col in df.columns:
# Bottom 5
sort = df.sort_values(by=col, ascending=True)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
# Top 5
sort = df.sort_values(by=col, ascending=False)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
#
# INFO Drop rows by index. We do this all at once in case there is a
# collision. This way, we don't end up dropping more rows than we have
# to, if there is a single row that satisfies the drop for multiple columns.
# Since there are 6 rows, if we end up dropping < 5*6*2 = 60 rows, that means
# there indeed were collisions.
print "Dropping {0} Outliers...".format(len(drop))
df.drop(inplace=True, labels=drop.keys(), axis=0)
print df.describe()
#
# INFO: What are you interested in?
#
# Depending on what you're interested in, you might take a different approach
# to normalizing/standardizing your data.
#
# You should note that all columns left in the dataset are of the same unit.
# You might ask yourself, do I even need to normalize / standardize the data?
# The answer depends on what you're trying to accomplish. For instance, although
# all the units are the same (generic money unit), the price per item in your
# store isn't. There may be some cheap items and some expensive one. If your goal
# is to find out what items people buy tend to buy together but you didn't
# unitize properly before running kMeans, the contribution of the lesser priced
# item would be dwarfed by the more expensive item.
#
# For a great overview on a few of the normalization methods supported in SKLearn,
# please check out: https://stackoverflow.com/questions/30918781/right-function-for-normalizing-input-of-sklearn-svm
#
# Suffice to say, at the end of the day, you're going to have to know what question
# you want answered and what data you have available in order to select the best
# method for your purpose. Luckily, SKLearn's interfaces are easy to switch out
# so in the mean time, you can experiment with all of them and see how they alter
# your results.
#
#
# 5-sec summary before you dive deeper online:
#
# NORMALIZATION: Let's say your user spend a LOT. Normalization divides each item by
# the average overall amount of spending. Stated differently, your
# new feature is = the contribution of overall spending going into
# that particular item: $spent on feature / $overall spent by sample
#
# MINMAX: What % in the overall range of $spent by all users on THIS particular
# feature is the current sample's feature at? When you're dealing with
# all the same units, this will produce a near face-value amount. Be
# careful though: if you have even a single outlier, it can cause all
# your data to get squashed up in lower percentages.
# Imagine your buyers usually spend $100 on wholesale milk, but today
# only spent $20. This is the relationship you're trying to capture
# with MinMax. NOTE: MinMax doesn't standardize (std. dev.); it only
# normalizes / unitizes your feature, in the mathematical sense.
# MinMax can be used as an alternative to zero mean, unit variance scaling.
# [(sampleFeatureValue-min) / (max-min)] * (max-min) + min
# Where min and max are for the overall feature values for all samples.
#
# TODO: Un-comment just ***ONE*** of lines at a time and see how alters your results
# Pay attention to the direction of the arrows, as well as their LENGTHS
#T = preprocessing.StandardScaler().fit_transform(df)
#T = preprocessing.MinMaxScaler().fit_transform(df)
#T = preprocessing.MaxAbsScaler().fit_transform(df)
#T = preprocessing.Normalizer().fit_transform(df)
T = df # No Change
#
# INFO: Sometimes people perform PCA before doing KMeans, so that KMeans only
# operates on the most meaningful features. In our case, there are so few features
# that doing PCA ahead of time isn't really necessary, and you can do KMeans in
# feature space. But keep in mind you have the option to transform your data to
# bring down its dimensionality. If you take that route, then your Clusters will
# already be in PCA-transformed feature space, and you won't have to project them
# again for visualization.
# Do KMeans
n_clusters = 3
centroids, labels = doKMeans(T, n_clusters)
print centroids
#
# TODO: Print out your centroids. They're currently in feature-space, which
# is good. Print them out before you transform them into PCA space for viewing
#
# .. your code here ..
#plt.scatter(centroids[0], centroids[1], label='Centroids')
# Do PCA *after* to visualize the results. Project the centroids as well as
# the samples into the new 2D feature space for visualization purposes.
display_pca = doPCA(T)
T = display_pca.transform(T)
CC = display_pca.transform(centroids)
# Visualize all the samples. Give them the color of their cluster label
fig = plt.figure()
ax = fig.add_subplot(111)
if PLOT_TYPE_TEXT:
# Plot the index of the sample, so you can further investigate it in your dset
for i in range(len(T)): ax.text(T[i,0], T[i,1], df.index[i], color=c[labels[i]], alpha=0.75, zorder=600000)
ax.set_xlim(min(T[:,0])*1.2, max(T[:,0])*1.2)
ax.set_ylim(min(T[:,1])*1.2, max(T[:,1])*1.2)
else:
# Plot a regular scatter plot
sample_colors = [ c[labels[i]] for i in range(len(T)) ]
ax.scatter(T[:, 0], T[:, 1], c=sample_colors, marker='o', alpha=0.2)
# Plot the Centroids as X's, and label them
ax.scatter(CC[:, 0], CC[:, 1], marker='x', s=169, linewidths=3, zorder=1000, c=c)
for i in range(len(centroids)): ax.text(CC[i, 0], CC[i, 1], str(i), zorder=500010, fontsize=18, color=c[i])
# Display feature vectors for investigation:
if PLOT_VECTORS: drawVectors(T, display_pca.components_, df.columns, plt)
# Add the cluster label back into the dataframe and display it:
df['label'] = pd.Series(labels, index=df.index)
print df
plt.show()
| mit |
FrederichRiver/neutrino | applications/venus/venus/company.py | 1 | 3019 | #!/usr/bin/python3
import pandas as pd
import re
import pandas
import requests
import random
from polaris.mysql8 import mysqlHeader, mysqlBase
from dev_global.env import GLOBAL_HEADER
from venus.stock_base import StockEventBase
class EventCompany(StockEventBase):
def record_company_infomation(self, stock_code):
url = f"http://quotes.money.163.com/f10/gszl_{stock_code[2:]}.html#01f02"
table_list = self.get_html_table(url, attr="[@class='table_bg001 border_box limit_sale table_details']")
t = pd.read_html(table_list)[0]
# print(t.iloc[12, 1])
insert_sql = (
"INSERT IGNORE INTO company_infomation ("
"stock_code, company_name, english_name, legal_representative, address,"
"chairman, secratery, main_business, business_scope, introduction) "
"VALUES ( "
f"'{stock_code}','{t.iloc[2, 1]}','{t.iloc[3, 1]}',"
f"'{t.iloc[6, 1]}','{t.iloc[1, 3]}','{t.iloc[4, 3]}','{t.iloc[5, 3]}',"
f"'{t.iloc[10, 1]}','{t.iloc[11, 1]}','{t.iloc[12, 1]}')"
)
try:
self.mysql.engine.execute(insert_sql)
except Exception as e:
print(e)
def record_stock_structure(self, stock_code):
import requests
from lxml import etree
url = f"https://vip.stock.finance.sina.com.cn/corp/go.php/vCI_StockStructureHistory/stockid/{stock_code[2:]}/stocktype/TotalStock.phtml"
table_list = self.get_html_table(url, attr="[contains(@id,'historyTable')]")
if table_list:
for table in table_list:
df = self._resolve_stock_structure_table(table)
self._update_stock_structure(stock_code, df)
def _resolve_stock_structure_table(self, table) -> pandas.DataFrame:
df = pd.read_html(table)
#print(df)
if df:
df[0].columns = ['change_date', 'total_stock']
result = df[0]
result['total_stock'] = df[0]['total_stock'].apply(filter_str2float)
result['change_date'] = pandas.to_datetime(result['change_date'])
return result
else:
return pandas.DataFrame()
def _update_stock_structure(self, stock_code, df:pandas.DataFrame):
TAB_COMP_STOCK_STRUC = 'company_stock_structure'
value = {}
if not df.empty:
for index, row in df.iterrows():
sql = (
f"INSERT IGNORE into {TAB_COMP_STOCK_STRUC} ("
f"stock_code,report_date,total_stock) "
f"VALUES ('{stock_code}','{row['change_date']}',{row['total_stock']})")
#print(sql)
self.mysql.engine.execute(sql)
def filter_str2float(x):
result = re.match(r'(\d+)', x)
if result:
return 10000 * float(result[1])
else:
return 0
if __name__ == "__main__":
from dev_global.env import GLOBAL_HEADER
event = EventCompany(GLOBAL_HEADER)
event.record_stock_structure('SH600059') | bsd-3-clause |
BeiLuoShiMen/nupic | examples/opf/tools/MirrorImageViz/mirrorImageViz.py | 50 | 7221 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Author: Surabhi Gupta
import sys
import numpy as np
import matplotlib.pylab as pyl
def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset):
'''Mirror Image Visualization: Shows the encoding space juxtaposed against the
coincidence space. The encoding space is the bottom-up sensory encoding and
the coincidence space depicts the corresponding activation of coincidences in
the SP. Hence, the mirror image visualization is a visual depiction of the
mapping of SP cells to the input representations.
Note:
* The files spBUOut and sensorBUOut are assumed to be in the output format
used for LPF experiment outputs.
* BU outputs for some sample datasets are provided. Specify the name of the
dataset as an option while running this script.
'''
lines = activeCoincsFile.readlines()
inputs = encodingsFile.readlines()
w = len(inputs[0].split(' '))-1
patterns = set([])
encodings = set([])
coincs = [] #The set of all coincidences that have won at least once
reUsedCoincs = []
firstLine = inputs[0].split(' ')
size = int(firstLine.pop(0))
spOutput = np.zeros((len(lines),40))
inputBits = np.zeros((len(lines),w))
print 'Total n:', size
print 'Total number of records in the file:', len(lines), '\n'
print 'w:', w
count = 0
for x in xrange(len(lines)):
inputSpace = [] #Encoded representation for each input
spBUout = [int(z) for z in lines[x].split(' ')]
spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP
temp = set(spBUout)
spOutput[x]=spBUout
input = [int(z) for z in inputs[x].split(' ')]
input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space
tempInput = set(input)
inputBits[x]=input
#Creating the encoding space
for m in xrange(size):
if m in tempInput:
inputSpace.append(m)
else:
inputSpace.append('|') #A non-active bit
repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active
reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active
#Dividing the coincidences into two difference categories.
if len(reUsed)==0:
coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary)
else:
reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput))
patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once
encodings = encodings.union(tempInput)
count +=1
overlap = {}
overlapVal = 0
seen = []
seen = (printOverlaps(coincs, coincs, seen))
print len(seen), 'sets of 40 cells'
seen = printOverlaps(reUsedCoincs, coincs, seen)
Summ=[]
for z in coincs:
c=0
for y in reUsedCoincs:
c += len(z[1].intersection(y[1]))
Summ.append(c)
print 'Sum: ', Summ
for m in xrange(3):
displayLimit = min(51, len(spOutput[m*200:]))
if displayLimit>0:
drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1)
else:
print 'No more records to display'
pyl.show()
def drawFile(dataset, matrix, patterns, cells, w, fnum):
'''The similarity of two patterns in the bit-encoding space is displayed alongside
their similarity in the sp-coinc space.'''
score=0
count = 0
assert len(patterns)==len(cells)
for p in xrange(len(patterns)-1):
matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]]
matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]]
score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:])))
count += len(matrix[p+1:,p])
print 'Score', score/count
fig = pyl.figure(figsize = (10,10), num = fnum)
pyl.matshow(matrix, fignum = fnum)
pyl.colorbar()
pyl.title('Coincidence Space', verticalalignment='top', fontsize=12)
pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17)
pyl.ylabel('Encoding space', fontsize=12)
def printOverlaps(comparedTo, coincs, seen):
""" Compare the results and return True if success, False if failure
Parameters:
--------------------------------------------------------------------
coincs: Which cells are we comparing?
comparedTo: The set of 40 cells we being compared to (they have no overlap with seen)
seen: Which of the cells we are comparing to have already been encountered.
This helps glue together the unique and reused coincs
"""
inputOverlap = 0
cellOverlap = 0
for y in comparedTo:
closestInputs = []
closestCells = []
if len(seen)>0:
inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))])
cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))])
for m in xrange( len(seen) ):
if len(seen[m][1].intersection(y[4]))==inputOverlap:
closestInputs.append(seen[m][2])
if len(seen[m][0].intersection(y[1]))==cellOverlap:
closestCells.append(seen[m][2])
seen.append((y[1], y[4], y[0]))
print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \
'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells)
return seen
if __name__=='__main__':
if len(sys.argv)<2: #Use basil if no dataset specified
print ('Input files required. Read documentation for details.')
else:
dataset = sys.argv[1]
activeCoincsPath = dataset+'/'+dataset+'_spBUOut.txt'
encodingsPath = dataset+'/'+dataset+'_sensorBUOut.txt'
activeCoincsFile=open(activeCoincsPath, 'r')
encodingsFile=open(encodingsPath, 'r')
analyzeOverlaps(activeCoincsFile, encodingsFile, dataset)
| agpl-3.0 |
winklerand/pandas | pandas/tests/reshape/test_util.py | 15 | 1898 |
import numpy as np
from pandas import date_range, Index
import pandas.util.testing as tm
from pandas.core.reshape.util import cartesian_product
class TestCartesianProduct(object):
def test_simple(self):
x, y = list('ABC'), [1, 22]
result1, result2 = cartesian_product([x, y])
expected1 = np.array(['A', 'A', 'B', 'B', 'C', 'C'])
expected2 = np.array([1, 22, 1, 22, 1, 22])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range('2000-01-01', periods=2)
result1, result2 = [Index(y).day for y in cartesian_product([x, x])]
expected1 = Index([1, 1, 2, 2])
expected2 = Index([1, 2, 1, 2])
tm.assert_index_equal(result1, expected1)
tm.assert_index_equal(result2, expected2)
def test_empty(self):
# product of empty factors
X = [[], [0, 1], []]
Y = [[], [], ['a', 'b', 'c']]
for x, y in zip(X, Y):
expected1 = np.array([], dtype=np.asarray(x).dtype)
expected2 = np.array([], dtype=np.asarray(y).dtype)
result1, result2 = cartesian_product([x, y])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
# empty product (empty input):
result = cartesian_product([])
expected = []
assert result == expected
def test_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
msg = "Input must be a list-like of list-likes"
for X in invalid_inputs:
tm.assert_raises_regex(TypeError, msg, cartesian_product, X=X)
| bsd-3-clause |
alheinecke/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/estimators/__init__.py | 17 | 12228 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head
from tensorflow.contrib.learn.python.learn.estimators.head import Head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head
from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn
from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head
from tensorflow.contrib.learn.python.learn.estimators.head import regression_head
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| apache-2.0 |
sauliusl/seaborn | seaborn/tests/test_axisgrid.py | 3 | 51598 | import warnings
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
from distutils.version import LooseVersion
import pytest
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
try:
import pandas.testing as tm
except ImportError:
import pandas.util.testing as tm
from .. import axisgrid as ag
from .. import rcmod
from ..palettes import color_palette
from ..distributions import kdeplot, _freedman_diaconis_bins
from ..categorical import pointplot
from ..utils import categorical_order
rs = np.random.RandomState(0)
old_matplotlib = LooseVersion(mpl.__version__) < "1.4"
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
class TestFacetGrid(object):
df = pd.DataFrame(dict(x=rs.normal(size=60),
y=rs.gamma(4, size=60),
a=np.repeat(list("abc"), 20),
b=np.tile(list("mn"), 30),
c=np.tile(list("tuv"), 20),
d=np.tile(list("abcdefghijkl"), 5)))
def test_self_data(self):
g = ag.FacetGrid(self.df)
nt.assert_is(g.data, self.df)
def test_self_fig(self):
g = ag.FacetGrid(self.df)
nt.assert_is_instance(g.fig, plt.Figure)
def test_self_axes(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
for ax in g.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
def test_axes_array_size(self):
g1 = ag.FacetGrid(self.df)
nt.assert_equal(g1.axes.shape, (1, 1))
g2 = ag.FacetGrid(self.df, row="a")
nt.assert_equal(g2.axes.shape, (3, 1))
g3 = ag.FacetGrid(self.df, col="b")
nt.assert_equal(g3.axes.shape, (1, 2))
g4 = ag.FacetGrid(self.df, hue="c")
nt.assert_equal(g4.axes.shape, (1, 1))
g5 = ag.FacetGrid(self.df, row="a", col="b", hue="c")
nt.assert_equal(g5.axes.shape, (3, 2))
for ax in g5.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
def test_single_axes(self):
g1 = ag.FacetGrid(self.df)
nt.assert_is_instance(g1.ax, plt.Axes)
g2 = ag.FacetGrid(self.df, row="a")
with nt.assert_raises(AttributeError):
g2.ax
g3 = ag.FacetGrid(self.df, col="a")
with nt.assert_raises(AttributeError):
g3.ax
g4 = ag.FacetGrid(self.df, col="a", row="b")
with nt.assert_raises(AttributeError):
g4.ax
def test_col_wrap(self):
n = len(self.df.d.unique())
g = ag.FacetGrid(self.df, col="d")
assert g.axes.shape == (1, n)
assert g.facet_axis(0, 8) is g.axes[0, 8]
g_wrap = ag.FacetGrid(self.df, col="d", col_wrap=4)
assert g_wrap.axes.shape == (n,)
assert g_wrap.facet_axis(0, 8) is g_wrap.axes[8]
assert g_wrap._ncol == 4
assert g_wrap._nrow == (n / 4)
with pytest.raises(ValueError):
g = ag.FacetGrid(self.df, row="b", col="d", col_wrap=4)
df = self.df.copy()
df.loc[df.d == "j"] = np.nan
g_missing = ag.FacetGrid(df, col="d")
assert g_missing.axes.shape == (1, n - 1)
g_missing_wrap = ag.FacetGrid(df, col="d", col_wrap=4)
assert g_missing_wrap.axes.shape == (n - 1,)
g = ag.FacetGrid(self.df, col="d", col_wrap=1)
assert len(list(g.facet_data())) == n
def test_normal_axes(self):
null = np.empty(0, object).flat
g = ag.FacetGrid(self.df)
npt.assert_array_equal(g._bottom_axes, g.axes.flat)
npt.assert_array_equal(g._not_bottom_axes, null)
npt.assert_array_equal(g._left_axes, g.axes.flat)
npt.assert_array_equal(g._not_left_axes, null)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, col="c")
npt.assert_array_equal(g._bottom_axes, g.axes.flat)
npt.assert_array_equal(g._not_bottom_axes, null)
npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, row="c")
npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)
npt.assert_array_equal(g._left_axes, g.axes.flat)
npt.assert_array_equal(g._not_left_axes, null)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, col="a", row="c")
npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)
npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)
npt.assert_array_equal(g._inner_axes, g.axes[:-1, 1:].flat)
def test_wrapped_axes(self):
null = np.empty(0, object).flat
g = ag.FacetGrid(self.df, col="a", col_wrap=2)
npt.assert_array_equal(g._bottom_axes,
g.axes[np.array([1, 2])].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:1].flat)
npt.assert_array_equal(g._left_axes, g.axes[np.array([0, 2])].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[np.array([1])].flat)
npt.assert_array_equal(g._inner_axes, null)
def test_figure_size(self):
g = ag.FacetGrid(self.df, row="a", col="b")
npt.assert_array_equal(g.fig.get_size_inches(), (6, 9))
g = ag.FacetGrid(self.df, row="a", col="b", height=6)
npt.assert_array_equal(g.fig.get_size_inches(), (12, 18))
g = ag.FacetGrid(self.df, col="c", height=4, aspect=.5)
npt.assert_array_equal(g.fig.get_size_inches(), (6, 4))
def test_figure_size_with_legend(self):
g1 = ag.FacetGrid(self.df, col="a", hue="c", height=4, aspect=.5)
npt.assert_array_equal(g1.fig.get_size_inches(), (6, 4))
g1.add_legend()
nt.assert_greater(g1.fig.get_size_inches()[0], 6)
g2 = ag.FacetGrid(self.df, col="a", hue="c", height=4, aspect=.5,
legend_out=False)
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))
g2.add_legend()
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))
def test_legend_data(self):
g1 = ag.FacetGrid(self.df, hue="a")
g1.map(plt.plot, "x", "y")
g1.add_legend()
palette = color_palette(n_colors=3)
nt.assert_equal(g1._legend.get_title().get_text(), "a")
a_levels = sorted(self.df.a.unique())
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(a_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), len(a_levels))
for label, level in zip(labels, a_levels):
nt.assert_equal(label.get_text(), level)
def test_legend_data_missing_level(self):
g1 = ag.FacetGrid(self.df, hue="a", hue_order=list("azbc"))
g1.map(plt.plot, "x", "y")
g1.add_legend()
b, g, r, p = color_palette(n_colors=4)
palette = [b, r, p]
nt.assert_equal(g1._legend.get_title().get_text(), "a")
a_levels = sorted(self.df.a.unique())
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(a_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), 4)
for label, level in zip(labels, list("azbc")):
nt.assert_equal(label.get_text(), level)
def test_get_boolean_legend_data(self):
self.df["b_bool"] = self.df.b == "m"
g1 = ag.FacetGrid(self.df, hue="b_bool")
g1.map(plt.plot, "x", "y")
g1.add_legend()
palette = color_palette(n_colors=2)
nt.assert_equal(g1._legend.get_title().get_text(), "b_bool")
b_levels = list(map(str, categorical_order(self.df.b_bool)))
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(b_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), len(b_levels))
for label, level in zip(labels, b_levels):
nt.assert_equal(label.get_text(), level)
def test_legend_options(self):
g1 = ag.FacetGrid(self.df, hue="b")
g1.map(plt.plot, "x", "y")
g1.add_legend()
def test_legendout_with_colwrap(self):
g = ag.FacetGrid(self.df, col="d", hue='b',
col_wrap=4, legend_out=False)
g.map(plt.plot, "x", "y", linewidth=3)
g.add_legend()
def test_subplot_kws(self):
g = ag.FacetGrid(self.df, despine=False,
subplot_kws=dict(projection="polar"))
for ax in g.axes.flat:
nt.assert_true("PolarAxesSubplot" in str(type(ax)))
@skipif(old_matplotlib)
def test_gridspec_kws(self):
ratios = [3, 1, 2]
gskws = dict(width_ratios=ratios)
g = ag.FacetGrid(self.df, col='c', row='a', gridspec_kws=gskws)
for ax in g.axes.flat:
ax.set_xticks([])
ax.set_yticks([])
g.fig.tight_layout()
for (l, m, r) in g.axes:
assert l.get_position().width > m.get_position().width
assert r.get_position().width > m.get_position().width
@skipif(old_matplotlib)
def test_gridspec_kws_col_wrap(self):
ratios = [3, 1, 2, 1, 1]
gskws = dict(width_ratios=ratios)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("always")
npt.assert_warns(UserWarning, ag.FacetGrid, self.df, col='d',
col_wrap=5, gridspec_kws=gskws)
@skipif(not old_matplotlib)
def test_gridsic_kws_old_mpl(self):
ratios = [3, 1, 2]
gskws = dict(width_ratios=ratios, height_ratios=ratios)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("always")
npt.assert_warns(UserWarning, ag.FacetGrid, self.df, col='c',
row='a', gridspec_kws=gskws)
def test_data_generator(self):
g = ag.FacetGrid(self.df, row="a")
d = list(g.facet_data())
nt.assert_equal(len(d), 3)
tup, data = d[0]
nt.assert_equal(tup, (0, 0, 0))
nt.assert_true((data["a"] == "a").all())
tup, data = d[1]
nt.assert_equal(tup, (1, 0, 0))
nt.assert_true((data["a"] == "b").all())
g = ag.FacetGrid(self.df, row="a", col="b")
d = list(g.facet_data())
nt.assert_equal(len(d), 6)
tup, data = d[0]
nt.assert_equal(tup, (0, 0, 0))
nt.assert_true((data["a"] == "a").all())
nt.assert_true((data["b"] == "m").all())
tup, data = d[1]
nt.assert_equal(tup, (0, 1, 0))
nt.assert_true((data["a"] == "a").all())
nt.assert_true((data["b"] == "n").all())
tup, data = d[2]
nt.assert_equal(tup, (1, 0, 0))
nt.assert_true((data["a"] == "b").all())
nt.assert_true((data["b"] == "m").all())
g = ag.FacetGrid(self.df, hue="c")
d = list(g.facet_data())
nt.assert_equal(len(d), 3)
tup, data = d[1]
nt.assert_equal(tup, (0, 0, 1))
nt.assert_true((data["c"] == "u").all())
def test_map(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
g.map(plt.plot, "x", "y", linewidth=3)
lines = g.axes[0, 0].lines
nt.assert_equal(len(lines), 3)
line1, _, _ = lines
nt.assert_equal(line1.get_linewidth(), 3)
x, y = line1.get_data()
mask = (self.df.a == "a") & (self.df.b == "m") & (self.df.c == "t")
npt.assert_array_equal(x, self.df.x[mask])
npt.assert_array_equal(y, self.df.y[mask])
def test_map_dataframe(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
def plot(x, y, data=None, **kws):
plt.plot(data[x], data[y], **kws)
g.map_dataframe(plot, "x", "y", linestyle="--")
lines = g.axes[0, 0].lines
nt.assert_equal(len(lines), 3)
line1, _, _ = lines
nt.assert_equal(line1.get_linestyle(), "--")
x, y = line1.get_data()
mask = (self.df.a == "a") & (self.df.b == "m") & (self.df.c == "t")
npt.assert_array_equal(x, self.df.x[mask])
npt.assert_array_equal(y, self.df.y[mask])
def test_set(self):
g = ag.FacetGrid(self.df, row="a", col="b")
xlim = (-2, 5)
ylim = (3, 6)
xticks = [-2, 0, 3, 5]
yticks = [3, 4.5, 6]
g.set(xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks)
for ax in g.axes.flat:
npt.assert_array_equal(ax.get_xlim(), xlim)
npt.assert_array_equal(ax.get_ylim(), ylim)
npt.assert_array_equal(ax.get_xticks(), xticks)
npt.assert_array_equal(ax.get_yticks(), yticks)
def test_set_titles(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "a = a | b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "a = a | b = n")
nt.assert_equal(g.axes[1, 0].get_title(), "a = b | b = m")
# Test a provided title
g.set_titles("{row_var} == {row_name} \/ {col_var} == {col_name}")
nt.assert_equal(g.axes[0, 0].get_title(), "a == a \/ b == m")
nt.assert_equal(g.axes[0, 1].get_title(), "a == a \/ b == n")
nt.assert_equal(g.axes[1, 0].get_title(), "a == b \/ b == m")
# Test a single row
g = ag.FacetGrid(self.df, col="b")
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "b = n")
# test with dropna=False
g = ag.FacetGrid(self.df, col="b", hue="b", dropna=False)
g.map(plt.plot, 'x', 'y')
def test_set_titles_margin_titles(self):
g = ag.FacetGrid(self.df, row="a", col="b", margin_titles=True)
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "b = n")
nt.assert_equal(g.axes[1, 0].get_title(), "")
# Test the row "titles"
nt.assert_equal(g.axes[0, 1].texts[0].get_text(), "a = a")
nt.assert_equal(g.axes[1, 1].texts[0].get_text(), "a = b")
# Test a provided title
g.set_titles(col_template="{col_var} == {col_name}")
nt.assert_equal(g.axes[0, 0].get_title(), "b == m")
nt.assert_equal(g.axes[0, 1].get_title(), "b == n")
nt.assert_equal(g.axes[1, 0].get_title(), "")
def test_set_ticklabels(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
xlab = [l.get_text() + "h" for l in g.axes[1, 0].get_xticklabels()]
ylab = [l.get_text() for l in g.axes[1, 0].get_yticklabels()]
g.set_xticklabels(xlab)
g.set_yticklabels(rotation=90)
got_x = [l.get_text() for l in g.axes[1, 1].get_xticklabels()]
got_y = [l.get_text() for l in g.axes[0, 0].get_yticklabels()]
npt.assert_array_equal(got_x, xlab)
npt.assert_array_equal(got_y, ylab)
x, y = np.arange(10), np.arange(10)
df = pd.DataFrame(np.c_[x, y], columns=["x", "y"])
g = ag.FacetGrid(df).map(pointplot, "x", "y", order=x)
g.set_xticklabels(step=2)
got_x = [int(l.get_text()) for l in g.axes[0, 0].get_xticklabels()]
npt.assert_array_equal(x[::2], got_x)
g = ag.FacetGrid(self.df, col="d", col_wrap=5)
g.map(plt.plot, "x", "y")
g.set_xticklabels(rotation=45)
g.set_yticklabels(rotation=75)
for ax in g._bottom_axes:
for l in ax.get_xticklabels():
nt.assert_equal(l.get_rotation(), 45)
for ax in g._left_axes:
for l in ax.get_yticklabels():
nt.assert_equal(l.get_rotation(), 75)
def test_set_axis_labels(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
xlab = 'xx'
ylab = 'yy'
g.set_axis_labels(xlab, ylab)
got_x = [ax.get_xlabel() for ax in g.axes[-1, :]]
got_y = [ax.get_ylabel() for ax in g.axes[:, 0]]
npt.assert_array_equal(got_x, xlab)
npt.assert_array_equal(got_y, ylab)
def test_axis_lims(self):
g = ag.FacetGrid(self.df, row="a", col="b", xlim=(0, 4), ylim=(-2, 3))
nt.assert_equal(g.axes[0, 0].get_xlim(), (0, 4))
nt.assert_equal(g.axes[0, 0].get_ylim(), (-2, 3))
def test_data_orders(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
nt.assert_equal(g.row_names, list("abc"))
nt.assert_equal(g.col_names, list("mn"))
nt.assert_equal(g.hue_names, list("tuv"))
nt.assert_equal(g.axes.shape, (3, 2))
g = ag.FacetGrid(self.df, row="a", col="b", hue="c",
row_order=list("bca"),
col_order=list("nm"),
hue_order=list("vtu"))
nt.assert_equal(g.row_names, list("bca"))
nt.assert_equal(g.col_names, list("nm"))
nt.assert_equal(g.hue_names, list("vtu"))
nt.assert_equal(g.axes.shape, (3, 2))
g = ag.FacetGrid(self.df, row="a", col="b", hue="c",
row_order=list("bcda"),
col_order=list("nom"),
hue_order=list("qvtu"))
nt.assert_equal(g.row_names, list("bcda"))
nt.assert_equal(g.col_names, list("nom"))
nt.assert_equal(g.hue_names, list("qvtu"))
nt.assert_equal(g.axes.shape, (4, 3))
def test_palette(self):
rcmod.set()
g = ag.FacetGrid(self.df, hue="c")
assert g._colors == color_palette(n_colors=len(self.df.c.unique()))
g = ag.FacetGrid(self.df, hue="d")
assert g._colors == color_palette("husl", len(self.df.d.unique()))
g = ag.FacetGrid(self.df, hue="c", palette="Set2")
assert g._colors == color_palette("Set2", len(self.df.c.unique()))
dict_pal = dict(t="red", u="green", v="blue")
list_pal = color_palette(["red", "green", "blue"], 3)
g = ag.FacetGrid(self.df, hue="c", palette=dict_pal)
assert g._colors == list_pal
list_pal = color_palette(["green", "blue", "red"], 3)
g = ag.FacetGrid(self.df, hue="c", hue_order=list("uvt"),
palette=dict_pal)
assert g._colors == list_pal
def test_hue_kws(self):
kws = dict(marker=["o", "s", "D"])
g = ag.FacetGrid(self.df, hue="c", hue_kws=kws)
g.map(plt.plot, "x", "y")
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
def test_dropna(self):
df = self.df.copy()
hasna = pd.Series(np.tile(np.arange(6), 10), dtype=np.float)
hasna[hasna == 5] = np.nan
df["hasna"] = hasna
g = ag.FacetGrid(df, dropna=False, row="hasna")
nt.assert_equal(g._not_na.sum(), 60)
g = ag.FacetGrid(df, dropna=True, row="hasna")
nt.assert_equal(g._not_na.sum(), 50)
def test_unicode_column_label_with_rows(self):
# use a smaller copy of the default testing data frame:
df = self.df.copy()
df = df[["a", "b", "x"]]
# rename column 'a' (which will be used for the columns in the grid)
# by using a Unicode string:
unicode_column_label = u"\u01ff\u02ff\u03ff"
df = df.rename(columns={"a": unicode_column_label})
# ensure that the data frame columns have the expected names:
nt.assert_equal(list(df.columns), [unicode_column_label, "b", "x"])
# plot the grid -- if successful, no UnicodeEncodingError should
# occur:
g = ag.FacetGrid(df, col=unicode_column_label, row="b")
g = g.map(plt.plot, "x")
def test_unicode_column_label_no_rows(self):
# use a smaller copy of the default testing data frame:
df = self.df.copy()
df = df[["a", "x"]]
# rename column 'a' (which will be used for the columns in the grid)
# by using a Unicode string:
unicode_column_label = u"\u01ff\u02ff\u03ff"
df = df.rename(columns={"a": unicode_column_label})
# ensure that the data frame columns have the expected names:
nt.assert_equal(list(df.columns), [unicode_column_label, "x"])
# plot the grid -- if successful, no UnicodeEncodingError should
# occur:
g = ag.FacetGrid(df, col=unicode_column_label)
g = g.map(plt.plot, "x")
def test_unicode_row_label_with_columns(self):
# use a smaller copy of the default testing data frame:
df = self.df.copy()
df = df[["a", "b", "x"]]
# rename column 'b' (which will be used for the rows in the grid)
# by using a Unicode string:
unicode_row_label = u"\u01ff\u02ff\u03ff"
df = df.rename(columns={"b": unicode_row_label})
# ensure that the data frame columns have the expected names:
nt.assert_equal(list(df.columns), ["a", unicode_row_label, "x"])
# plot the grid -- if successful, no UnicodeEncodingError should
# occur:
g = ag.FacetGrid(df, col="a", row=unicode_row_label)
g = g.map(plt.plot, "x")
def test_unicode_row_label_no_columns(self):
# use a smaller copy of the default testing data frame:
df = self.df.copy()
df = df[["b", "x"]]
# rename column 'b' (which will be used for the rows in the grid)
# by using a Unicode string:
unicode_row_label = u"\u01ff\u02ff\u03ff"
df = df.rename(columns={"b": unicode_row_label})
# ensure that the data frame columns have the expected names:
nt.assert_equal(list(df.columns), [unicode_row_label, "x"])
# plot the grid -- if successful, no UnicodeEncodingError should
# occur:
g = ag.FacetGrid(df, row=unicode_row_label)
g = g.map(plt.plot, "x")
def test_unicode_content_with_row_and_column(self):
df = self.df.copy()
# replace content of column 'a' (which will form the columns in the
# grid) by Unicode characters:
unicode_column_val = np.repeat((u'\u01ff', u'\u02ff', u'\u03ff'), 20)
df["a"] = unicode_column_val
# make sure that the replacement worked as expected:
nt.assert_equal(
list(df["a"]),
[u'\u01ff'] * 20 + [u'\u02ff'] * 20 + [u'\u03ff'] * 20)
# plot the grid -- if successful, no UnicodeEncodingError should
# occur:
g = ag.FacetGrid(df, col="a", row="b")
g = g.map(plt.plot, "x")
def test_unicode_content_no_rows(self):
df = self.df.copy()
# replace content of column 'a' (which will form the columns in the
# grid) by Unicode characters:
unicode_column_val = np.repeat((u'\u01ff', u'\u02ff', u'\u03ff'), 20)
df["a"] = unicode_column_val
# make sure that the replacement worked as expected:
nt.assert_equal(
list(df["a"]),
[u'\u01ff'] * 20 + [u'\u02ff'] * 20 + [u'\u03ff'] * 20)
# plot the grid -- if successful, no UnicodeEncodingError should
# occur:
g = ag.FacetGrid(df, col="a")
g = g.map(plt.plot, "x")
def test_unicode_content_no_columns(self):
df = self.df.copy()
# replace content of column 'a' (which will form the rows in the
# grid) by Unicode characters:
unicode_column_val = np.repeat((u'\u01ff', u'\u02ff', u'\u03ff'), 20)
df["b"] = unicode_column_val
# make sure that the replacement worked as expected:
nt.assert_equal(
list(df["b"]),
[u'\u01ff'] * 20 + [u'\u02ff'] * 20 + [u'\u03ff'] * 20)
# plot the grid -- if successful, no UnicodeEncodingError should
# occur:
g = ag.FacetGrid(df, row="b")
g = g.map(plt.plot, "x")
@skipif(not pandas_has_categoricals)
def test_categorical_column_missing_categories(self):
df = self.df.copy()
df['a'] = df['a'].astype('category')
g = ag.FacetGrid(df[df['a'] == 'a'], col="a", col_wrap=1)
nt.assert_equal(g.axes.shape, (len(df['a'].cat.categories),))
def test_categorical_warning(self):
g = ag.FacetGrid(self.df, col="b")
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("always")
npt.assert_warns(UserWarning, g.map, pointplot, "b", "x")
class TestPairGrid(object):
rs = np.random.RandomState(sum(map(ord, "PairGrid")))
df = pd.DataFrame(dict(x=rs.normal(size=60),
y=rs.randint(0, 4, size=(60)),
z=rs.gamma(3, size=60),
a=np.repeat(list("abc"), 20),
b=np.repeat(list("abcdefghijkl"), 5)))
def test_self_data(self):
g = ag.PairGrid(self.df)
nt.assert_is(g.data, self.df)
def test_ignore_datelike_data(self):
df = self.df.copy()
df['date'] = pd.date_range('2010-01-01', periods=len(df), freq='d')
result = ag.PairGrid(self.df).data
expected = df.drop('date', axis=1)
tm.assert_frame_equal(result, expected)
def test_self_fig(self):
g = ag.PairGrid(self.df)
nt.assert_is_instance(g.fig, plt.Figure)
def test_self_axes(self):
g = ag.PairGrid(self.df)
for ax in g.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
def test_default_axes(self):
g = ag.PairGrid(self.df)
nt.assert_equal(g.axes.shape, (3, 3))
nt.assert_equal(g.x_vars, ["x", "y", "z"])
nt.assert_equal(g.y_vars, ["x", "y", "z"])
nt.assert_true(g.square_grid)
def test_specific_square_axes(self):
vars = ["z", "x"]
g = ag.PairGrid(self.df, vars=vars)
nt.assert_equal(g.axes.shape, (len(vars), len(vars)))
nt.assert_equal(g.x_vars, vars)
nt.assert_equal(g.y_vars, vars)
nt.assert_true(g.square_grid)
def test_specific_nonsquare_axes(self):
x_vars = ["x", "y"]
y_vars = ["z", "y", "x"]
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, x_vars)
nt.assert_equal(g.y_vars, y_vars)
nt.assert_true(not g.square_grid)
x_vars = ["x", "y"]
y_vars = "z"
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, list(x_vars))
nt.assert_equal(g.y_vars, list(y_vars))
nt.assert_true(not g.square_grid)
def test_specific_square_axes_with_array(self):
vars = np.array(["z", "x"])
g = ag.PairGrid(self.df, vars=vars)
nt.assert_equal(g.axes.shape, (len(vars), len(vars)))
nt.assert_equal(g.x_vars, list(vars))
nt.assert_equal(g.y_vars, list(vars))
nt.assert_true(g.square_grid)
def test_specific_nonsquare_axes_with_array(self):
x_vars = np.array(["x", "y"])
y_vars = np.array(["z", "y", "x"])
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, list(x_vars))
nt.assert_equal(g.y_vars, list(y_vars))
nt.assert_true(not g.square_grid)
def test_size(self):
g1 = ag.PairGrid(self.df, height=3)
npt.assert_array_equal(g1.fig.get_size_inches(), (9, 9))
g2 = ag.PairGrid(self.df, height=4, aspect=.5)
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 12))
g3 = ag.PairGrid(self.df, y_vars=["z"], x_vars=["x", "y"],
height=2, aspect=2)
npt.assert_array_equal(g3.fig.get_size_inches(), (8, 2))
def test_map(self):
vars = ["x", "y", "z"]
g1 = ag.PairGrid(self.df)
g1.map(plt.scatter)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
g2 = ag.PairGrid(self.df, "a")
g2.map(plt.scatter)
for i, axes_i in enumerate(g2.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
for k, k_level in enumerate(self.df.a.unique()):
x_in_k = x_in[self.df.a == k_level]
y_in_k = y_in[self.df.a == k_level]
x_out, y_out = ax.collections[k].get_offsets().T
npt.assert_array_equal(x_in_k, x_out)
npt.assert_array_equal(y_in_k, y_out)
def test_map_nonsquare(self):
x_vars = ["x"]
y_vars = ["y", "z"]
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
g.map(plt.scatter)
x_in = self.df.x
for i, i_var in enumerate(y_vars):
ax = g.axes[i, 0]
y_in = self.df[i_var]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
def test_map_lower(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_lower(plt.scatter)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.triu_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
def test_map_upper(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_upper(plt.scatter)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
@skipif(old_matplotlib)
def test_map_diag(self):
g1 = ag.PairGrid(self.df)
g1.map_diag(plt.hist)
for ax in g1.diag_axes:
nt.assert_equal(len(ax.patches), 10)
g2 = ag.PairGrid(self.df)
g2.map_diag(plt.hist, bins=15)
for ax in g2.diag_axes:
nt.assert_equal(len(ax.patches), 15)
g3 = ag.PairGrid(self.df, hue="a")
g3.map_diag(plt.hist)
for ax in g3.diag_axes:
nt.assert_equal(len(ax.patches), 30)
g4 = ag.PairGrid(self.df, hue="a")
g4.map_diag(plt.hist, histtype='step')
for ax in g4.diag_axes:
for ptch in ax.patches:
nt.assert_equal(ptch.fill, False)
@skipif(old_matplotlib)
def test_map_diag_color(self):
color = "red"
rgb_color = mpl.colors.colorConverter.to_rgba(color)
g1 = ag.PairGrid(self.df)
g1.map_diag(plt.hist, color=color)
for ax in g1.diag_axes:
for patch in ax.patches:
nt.assert_equals(patch.get_facecolor(), rgb_color)
g2 = ag.PairGrid(self.df)
g2.map_diag(kdeplot, color='red')
for ax in g2.diag_axes:
for line in ax.lines:
nt.assert_equals(line.get_color(), color)
@skipif(old_matplotlib)
def test_map_diag_palette(self):
pal = color_palette(n_colors=len(self.df.a.unique()))
g = ag.PairGrid(self.df, hue="a")
g.map_diag(kdeplot)
for ax in g.diag_axes:
for line, color in zip(ax.lines, pal):
nt.assert_equals(line.get_color(), color)
@skipif(old_matplotlib)
def test_map_diag_and_offdiag(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_offdiag(plt.scatter)
g.map_diag(plt.hist)
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
def test_palette(self):
rcmod.set()
g = ag.PairGrid(self.df, hue="a")
assert g.palette == color_palette(n_colors=len(self.df.a.unique()))
g = ag.PairGrid(self.df, hue="b")
assert g.palette == color_palette("husl", len(self.df.b.unique()))
g = ag.PairGrid(self.df, hue="a", palette="Set2")
assert g.palette == color_palette("Set2", len(self.df.a.unique()))
dict_pal = dict(a="red", b="green", c="blue")
list_pal = color_palette(["red", "green", "blue"])
g = ag.PairGrid(self.df, hue="a", palette=dict_pal)
assert g.palette == list_pal
list_pal = color_palette(["blue", "red", "green"])
g = ag.PairGrid(self.df, hue="a", hue_order=list("cab"),
palette=dict_pal)
assert g.palette == list_pal
def test_hue_kws(self):
kws = dict(marker=["o", "s", "d", "+"])
g = ag.PairGrid(self.df, hue="a", hue_kws=kws)
g.map(plt.plot)
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
g = ag.PairGrid(self.df, hue="a", hue_kws=kws,
hue_order=list("dcab"))
g.map(plt.plot)
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
@skipif(old_matplotlib)
def test_hue_order(self):
order = list("dcab")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_diag(plt.plot)
for line, level in zip(g.axes[0, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_lower(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_upper(plt.plot)
for line, level in zip(g.axes[0, 1].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "y"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
@skipif(old_matplotlib)
def test_hue_order_missing_level(self):
order = list("dcaeb")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_diag(plt.plot)
for line, level in zip(g.axes[0, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_lower(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_upper(plt.plot)
for line, level in zip(g.axes[0, 1].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "y"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
def test_nondefault_index(self):
df = self.df.copy().set_index("b")
vars = ["x", "y", "z"]
g1 = ag.PairGrid(df)
g1.map(plt.scatter)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
g2 = ag.PairGrid(df, "a")
g2.map(plt.scatter)
for i, axes_i in enumerate(g2.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
for k, k_level in enumerate(self.df.a.unique()):
x_in_k = x_in[self.df.a == k_level]
y_in_k = y_in[self.df.a == k_level]
x_out, y_out = ax.collections[k].get_offsets().T
npt.assert_array_equal(x_in_k, x_out)
npt.assert_array_equal(y_in_k, y_out)
@skipif(old_matplotlib)
def test_pairplot(self):
vars = ["x", "y", "z"]
g = ag.pairplot(self.df)
for ax in g.diag_axes:
assert len(ax.patches) > 1
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
g = ag.pairplot(self.df, hue="a")
n = len(self.df.a.unique())
for ax in g.diag_axes:
assert len(ax.lines) == n
assert len(ax.collections) == n
@skipif(old_matplotlib)
def test_pairplot_reg(self):
vars = ["x", "y", "z"]
g = ag.pairplot(self.df, diag_kind="hist", kind="reg")
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
@skipif(old_matplotlib)
def test_pairplot_kde(self):
vars = ["x", "y", "z"]
g = ag.pairplot(self.df, diag_kind="kde")
for ax in g.diag_axes:
nt.assert_equal(len(ax.lines), 1)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
@skipif(old_matplotlib)
def test_pairplot_markers(self):
vars = ["x", "y", "z"]
markers = ["o", "x", "s"]
g = ag.pairplot(self.df, hue="a", vars=vars, markers=markers)
assert g.hue_kws["marker"] == markers
plt.close("all")
with pytest.raises(ValueError):
g = ag.pairplot(self.df, hue="a", vars=vars, markers=markers[:-2])
class TestJointGrid(object):
rs = np.random.RandomState(sum(map(ord, "JointGrid")))
x = rs.randn(100)
y = rs.randn(100)
x_na = x.copy()
x_na[10] = np.nan
x_na[20] = np.nan
data = pd.DataFrame(dict(x=x, y=y, x_na=x_na))
def test_margin_grid_from_lists(self):
g = ag.JointGrid(self.x.tolist(), self.y.tolist())
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
def test_margin_grid_from_arrays(self):
g = ag.JointGrid(self.x, self.y)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
def test_margin_grid_from_series(self):
g = ag.JointGrid(self.data.x, self.data.y)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
def test_margin_grid_from_dataframe(self):
g = ag.JointGrid("x", "y", self.data)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
def test_margin_grid_from_dataframe_bad_variable(self):
with nt.assert_raises(ValueError):
ag.JointGrid("x", "bad_column", self.data)
def test_margin_grid_axis_labels(self):
g = ag.JointGrid("x", "y", self.data)
xlabel, ylabel = g.ax_joint.get_xlabel(), g.ax_joint.get_ylabel()
nt.assert_equal(xlabel, "x")
nt.assert_equal(ylabel, "y")
g.set_axis_labels("x variable", "y variable")
xlabel, ylabel = g.ax_joint.get_xlabel(), g.ax_joint.get_ylabel()
nt.assert_equal(xlabel, "x variable")
nt.assert_equal(ylabel, "y variable")
def test_dropna(self):
g = ag.JointGrid("x_na", "y", self.data, dropna=False)
nt.assert_equal(len(g.x), len(self.x_na))
g = ag.JointGrid("x_na", "y", self.data, dropna=True)
nt.assert_equal(len(g.x), pd.notnull(self.x_na).sum())
def test_axlims(self):
lim = (-3, 3)
g = ag.JointGrid("x", "y", self.data, xlim=lim, ylim=lim)
nt.assert_equal(g.ax_joint.get_xlim(), lim)
nt.assert_equal(g.ax_joint.get_ylim(), lim)
nt.assert_equal(g.ax_marg_x.get_xlim(), lim)
nt.assert_equal(g.ax_marg_y.get_ylim(), lim)
def test_marginal_ticks(self):
g = ag.JointGrid("x", "y", self.data)
nt.assert_true(~len(g.ax_marg_x.get_xticks()))
nt.assert_true(~len(g.ax_marg_y.get_yticks()))
def test_bivariate_plot(self):
g = ag.JointGrid("x", "y", self.data)
g.plot_joint(plt.plot)
x, y = g.ax_joint.lines[0].get_xydata().T
npt.assert_array_equal(x, self.x)
npt.assert_array_equal(y, self.y)
def test_univariate_plot(self):
g = ag.JointGrid("x", "x", self.data)
g.plot_marginals(kdeplot)
_, y1 = g.ax_marg_x.lines[0].get_xydata().T
y2, _ = g.ax_marg_y.lines[0].get_xydata().T
npt.assert_array_equal(y1, y2)
def test_plot(self):
g = ag.JointGrid("x", "x", self.data)
g.plot(plt.plot, kdeplot)
x, y = g.ax_joint.lines[0].get_xydata().T
npt.assert_array_equal(x, self.x)
npt.assert_array_equal(y, self.x)
_, y1 = g.ax_marg_x.lines[0].get_xydata().T
y2, _ = g.ax_marg_y.lines[0].get_xydata().T
npt.assert_array_equal(y1, y2)
def test_annotate(self):
g = ag.JointGrid("x", "y", self.data)
rp = stats.pearsonr(self.x, self.y)
g.annotate(stats.pearsonr)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "pearsonr = %.2g; p = %.2g" % rp)
g.annotate(stats.pearsonr, stat="correlation")
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "correlation = %.2g; p = %.2g" % rp)
def rsquared(x, y):
return stats.pearsonr(x, y)[0] ** 2
r2 = rsquared(self.x, self.y)
g.annotate(rsquared)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "rsquared = %.2g" % r2)
template = "{stat} = {val:.3g} (p = {p:.3g})"
g.annotate(stats.pearsonr, template=template)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, template.format(stat="pearsonr",
val=rp[0], p=rp[1]))
def test_space(self):
g = ag.JointGrid("x", "y", self.data, space=0)
joint_bounds = g.ax_joint.bbox.bounds
marg_x_bounds = g.ax_marg_x.bbox.bounds
marg_y_bounds = g.ax_marg_y.bbox.bounds
nt.assert_equal(joint_bounds[2], marg_x_bounds[2])
nt.assert_equal(joint_bounds[3], marg_y_bounds[3])
class TestJointPlot(object):
rs = np.random.RandomState(sum(map(ord, "jointplot")))
x = rs.randn(100)
y = rs.randn(100)
data = pd.DataFrame(dict(x=x, y=y))
def test_scatter(self):
g = ag.jointplot("x", "y", self.data)
nt.assert_equal(len(g.ax_joint.collections), 1)
x, y = g.ax_joint.collections[0].get_offsets().T
npt.assert_array_equal(self.x, x)
npt.assert_array_equal(self.y, y)
x_bins = _freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = _freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
def test_reg(self):
g = ag.jointplot("x", "y", self.data, kind="reg")
nt.assert_equal(len(g.ax_joint.collections), 2)
x, y = g.ax_joint.collections[0].get_offsets().T
npt.assert_array_equal(self.x, x)
npt.assert_array_equal(self.y, y)
x_bins = _freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = _freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
nt.assert_equal(len(g.ax_joint.lines), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 1)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_resid(self):
g = ag.jointplot("x", "y", self.data, kind="resid")
nt.assert_equal(len(g.ax_joint.collections), 1)
nt.assert_equal(len(g.ax_joint.lines), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 0)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_hex(self):
g = ag.jointplot("x", "y", self.data, kind="hex")
nt.assert_equal(len(g.ax_joint.collections), 1)
x_bins = _freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = _freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
def test_kde(self):
g = ag.jointplot("x", "y", self.data, kind="kde")
nt.assert_true(len(g.ax_joint.collections) > 0)
nt.assert_equal(len(g.ax_marg_x.collections), 1)
nt.assert_equal(len(g.ax_marg_y.collections), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 1)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_color(self):
g = ag.jointplot("x", "y", self.data, color="purple")
purple = mpl.colors.colorConverter.to_rgb("purple")
scatter_color = g.ax_joint.collections[0].get_facecolor()[0, :3]
nt.assert_equal(tuple(scatter_color), purple)
hist_color = g.ax_marg_x.patches[0].get_facecolor()[:3]
nt.assert_equal(hist_color, purple)
def test_annotation(self):
g = ag.jointplot("x", "y", self.data, stat_func=stats.pearsonr)
nt.assert_equal(len(g.ax_joint.legend_.get_texts()), 1)
g = ag.jointplot("x", "y", self.data, stat_func=None)
nt.assert_is(g.ax_joint.legend_, None)
def test_hex_customise(self):
# test that default gridsize can be overridden
g = ag.jointplot("x", "y", self.data, kind="hex",
joint_kws=dict(gridsize=5))
nt.assert_equal(len(g.ax_joint.collections), 1)
a = g.ax_joint.collections[0].get_array()
nt.assert_equal(28, a.shape[0]) # 28 hexagons expected for gridsize 5
def test_bad_kind(self):
with nt.assert_raises(ValueError):
ag.jointplot("x", "y", self.data, kind="not_a_kind")
| bsd-3-clause |
samgoodgame/sf_crime | iterations/spark-sklearn/nn.py | 1 | 3171 | import numpy as np
from time import time
from operator import itemgetter
from sklearn import svm, grid_search
from sklearn.ensemble import RandomForestClassifier
from sknn.mlp import Classifier, Layer
from spark_sklearn import GridSearchCV
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
##################### Data Wrangling #######################################
data_path = "./x_data_3.csv"
df = pd.read_csv(data_path, header=0)
x_data = df.drop('category', 1)
y = df.category.as_matrix()
x_complete = x_data.fillna(x_data.mean())
X_raw = x_complete.as_matrix()
X = MinMaxScaler().fit_transform(X_raw)
np.random.seed(0)
shuffle = np.random.permutation(np.arange(X.shape[0]))
X, y = X[shuffle], y[shuffle]
# Due to difficulties with log loss and set(y_pred) needing to match set(labels), we will remove the extremely rare
# crimes from the data for quality issues.
X_minus_trea = X[np.where(y != 'TREA')]
y_minus_trea = y[np.where(y != 'TREA')]
X_final = X_minus_trea[np.where(y_minus_trea != 'PORNOGRAPHY/OBSCENE MAT')]
y_final = y_minus_trea[np.where(y_minus_trea != 'PORNOGRAPHY/OBSCENE MAT')]
# Separate training, dev, and test data:
test_data, test_labels = X_final[800000:], y_final[800000:]
dev_data, dev_labels = X_final[700000:800000], y_final[700000:800000]
train_data, train_labels = X_final[100000:700000], y_final[100000:700000]
calibrate_data, calibrate_labels = X_final[:100000], y_final[:100000]
# Create mini versions of the above sets
mini_train_data, mini_train_labels = X_final[:20000], y_final[:20000]
mini_calibrate_data, mini_calibrate_labels = X_final[19000:28000], y_final[19000:28000]
mini_dev_data, mini_dev_labels = X_final[49000:60000], y_final[49000:60000]
param_grid= {
'learning_rate': [0.05, 0.01, 0.005, 0.001],
'n_iter': [25, 50, 100, 200],
'hidden0__units': [4, 8, 12, 16, 20],
'hidden0__type': ["Rectifier", "Sigmoid", "Tanh"],
'hidden0__dropout':[0.2, 0.3, 0.4],
'hidden1__units': [4, 8, 12, 16, 20],
'hidden1__type': ["Rectifier", "Sigmoid", "Tanh"],
'hidden1__dropout':[0.2, 0.3, 0.4],
'hidden2__units': [4, 8, 12, 16, 20],
'hidden2__type': ["Rectifier", "Sigmoid", "Tanh"],
'hidden2__dropout':[0.2, 0.3, 0.4]}
nn = Classifier(
layers=[
Layer("Sigmoid", units = 20),
Layer("Sigmoid", units = 20),
Layer("Sigmoid", units = 20),
Layer("Softmax")])
gs = GridSearchCV(sc, nn, param_grid)
start = time()
gs.fit(mini_train_data, mini_train_labels)
print("GridSearchCV took {:.2f} seconds for {:d} candidate settings.".format(time() - start, len(gs.grid_scores_)))
report(gs.grid_scores_)
| mit |
pianomania/scikit-learn | benchmarks/bench_plot_lasso_path.py | 84 | 4005 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
| bsd-3-clause |
chrjxj/zipline | zipline/utils/security_list.py | 2 | 4544 | from datetime import datetime
from os import listdir
import os.path
import pandas as pd
import pytz
import zipline
DATE_FORMAT = "%Y%m%d"
zipline_dir = os.path.dirname(zipline.__file__)
SECURITY_LISTS_DIR = os.path.join(zipline_dir, 'resources', 'security_lists')
class SecurityList(object):
def __init__(self, data, current_date_func, asset_finder):
"""
data: a nested dictionary:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': []}, delete: [symbol list]}
current_date_func: function taking no parameters, returning
current datetime
"""
self.data = data
self._cache = {}
self._knowledge_dates = self.make_knowledge_dates(self.data)
self.current_date = current_date_func
self.count = 0
self._current_set = set()
self.asset_finder = asset_finder
def make_knowledge_dates(self, data):
knowledge_dates = sorted(
[pd.Timestamp(k) for k in data.keys()])
return knowledge_dates
def __iter__(self):
return iter(self.restricted_list)
def __contains__(self, item):
return item in self.restricted_list
@property
def restricted_list(self):
cd = self.current_date()
for kd in self._knowledge_dates:
if cd < kd:
break
if kd in self._cache:
self._current_set = self._cache[kd]
continue
for effective_date, changes in iter(self.data[kd].items()):
self.update_current(
effective_date,
changes['add'],
self._current_set.add
)
self.update_current(
effective_date,
changes['delete'],
self._current_set.remove
)
self._cache[kd] = self._current_set
return self._current_set
def update_current(self, effective_date, symbols, change_func):
for symbol in symbols:
asset = self.asset_finder.lookup_symbol(
symbol,
as_of_date=effective_date
)
# Pass if no Asset exists for the symbol
if asset is None:
continue
change_func(asset.sid)
class SecurityListSet(object):
# provide a cut point to substitute other security
# list implementations.
security_list_type = SecurityList
def __init__(self, current_date_func, asset_finder):
self.current_date_func = current_date_func
self.asset_finder = asset_finder
self._leveraged_etf = None
@property
def leveraged_etf_list(self):
if self._leveraged_etf is None:
self._leveraged_etf = self.security_list_type(
load_from_directory('leveraged_etf_list'),
self.current_date_func,
asset_finder=self.asset_finder
)
return self._leveraged_etf
def load_from_directory(list_name):
"""
To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]}
"""
data = {}
dir_path = os.path.join(SECURITY_LISTS_DIR, list_name)
for kd_name in listdir(dir_path):
kd = datetime.strptime(kd_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd] = {}
kd_path = os.path.join(dir_path, kd_name)
for ld_name in listdir(kd_path):
ld = datetime.strptime(ld_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd][ld] = {}
ld_path = os.path.join(kd_path, ld_name)
for fname in listdir(ld_path):
fpath = os.path.join(ld_path, fname)
with open(fpath) as f:
symbols = f.read().splitlines()
data[kd][ld][fname] = symbols
return data
| apache-2.0 |
kylerbrown/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
jakeown/GAS | GAS/gauss_fit.py | 1 | 9175 | from spectral_cube import SpectralCube
from astropy.io import fits
import matplotlib.pyplot as plt
import astropy.units as u
import numpy as np
from scipy.optimize import curve_fit
from scipy import *
import time
import pprocess
from astropy.convolution import convolve
import radio_beam
import sys
def gauss_fitter(region = 'Cepheus_L1251', snr_min = 3.0, mol = 'C2S', vmin = 5.0, vmax=10.0, convolve=False, use_old_conv=False, multicore = 1, file_extension = None):
"""
Fit a Gaussian to non-NH3 emission lines from GAS.
It creates a cube for the best-fit Gaussian, a cube
for the best-fit Gaussian with noise added back into
the spectrum, and a parameter map of Tpeak, Vlsr, and FWHM
Parameters
----------
region : str
Name of region to reduce
snr_min : float
Lowest signal-to-noise pixels to include in the line-fitting
mol : str
name of molecule to fit
vmin : numpy.float
Minimum centroid velocity, in km/s.
vmax : numpy.float
Maximum centroid velocity, in km/s.
convolve : bool or float
If not False, specifies the beam-size to convolve the original map with
Beam-size must be given in arcseconds
use_old_conv : bool
If True, use an already convolved map with name:
region + '_' + mol + file_extension + '_conv.fits'
This convolved map must be in units of km/s
multicore : int
Maximum number of simultaneous processes desired
file_extension: str
filename extension
"""
if file_extension:
root = file_extension
else:
# root = 'base{0}'.format(blorder)
root = 'all'
molecules = ['C2S', 'HC7N_22_21', 'HC7N_21_20', 'HC5N']
MolFile = '{0}/{0}_{2}_{1}.fits'.format(region,root,mol)
ConvFile = '{0}/{0}_{2}_{1}_conv.fits'.format(region,root,mol)
GaussOut = '{0}/{0}_{2}_{1}_gauss_cube.fits'.format(region,root,mol)
GaussNoiseOut = '{0}/{0}_{2}_{1}_gauss_cube_noise.fits'.format(region,root,mol)
ParamOut = '{0}/{0}_{2}_{1}_param_cube.fits'.format(region,root,mol)
# Load the spectral cube and convert to velocity units
cube = SpectralCube.read(MolFile)
cube_km = cube.with_spectral_unit(u.km / u.s, velocity_convention='radio')
# If desired, convolve map with larger beam
# or load previously created convolved cube
if convolve:
cube = SpectralCube.read(MolFile)
cube_km_1 = cube.with_spectral_unit(u.km / u.s, velocity_convention='radio')
beam = radio_beam.Beam(major=convolve*u.arcsec, minor=convolve*u.arcsec, pa=0*u.deg)
cube_km = cube_km_1.convolve_to(beam)
cube_km.write(ConvFile, format='fits', overwrite=True)
if use_old_conv:
cube_km = SpectralCube.read(ConvFile)
# Define the spectral axis in km/s
spectra_x_axis_kms = np.array(cube_km.spectral_axis)
# Find the channel range corresponding to vmin and vmax
# -- This is a hold-over from when I originally set up the code to
# use a channel range rather than velocity range.
# Can change later, but this should work for now.
low_channel = np.where(spectra_x_axis_kms<=vmax)[0][0]+1 # Add ones to change index to channel
high_channel = np.where(spectra_x_axis_kms>=vmin)[0][-1]+1 # Again, hold-over from older setup
peak_channels = [low_channel, high_channel]
# Create cubes for storing the fitted Gaussian profiles
# and the Gaussians with noise added back into the spectrum
header = cube_km.header
cube_gauss = np.array(cube_km.unmasked_data[:,:,:])
cube_gauss_noise = np.array(cube_km.unmasked_data[:,:,:])
shape = np.shape(cube_gauss)
# Set up a cube for storing fitted parameters
param_cube = np.zeros(6, shape[1], shape[2])
param_header = cube_km.header
# Define the Gaussian profile
def p_eval(x, a, x0, sigma):
return a*np.exp(-(x-x0)**2/(2*sigma**2))
# Create some arrays full of NANs
# To be used in output cubes if fits fail
nan_array=np.empty(shape[0]) # For gauss cubes
nan_array[:] = np.NAN
nan_array2=np.empty(param_cube.shape[0]) # For param cubes
nan_array2[:] = np.NAN
# Loop through each pixel and find those
# with SNR above snr_min
x = []
y = []
pixels = 0
for (i,j), value in np.ndenumerate(cube_gauss[0]):
spectra=np.array(cube_km.unmasked_data[:,i,j])
if (False in np.isnan(spectra)):
rms = np.nanstd(np.append(spectra[0:(peak_channels[0]-1)], spectra[(peak_channels[1]+1):len(spectra)]))
if (max(spectra[peak_channels[0]:peak_channels[1]]) / rms) > snr_min:
pixels+=1
x.append(i)
y.append(j)
else:
cube_gauss[:,i,j]=nan_array
param_cube[:,i,j]=nan_array2
cube_gauss_noise[:,i,j]=nan_array
print str(pixels) + ' Pixels above SNR=' + str(snr_min)
# Define a Gaussian fitting function for each pixel
# i, j are the x,y coordinates of the pixel being fit
def pix_fit(i,j):
spectra = np.array(cube_km.unmasked_data[:,i,j])
# Use the peak brightness Temp within specified channel
# range as the initial guess for Gaussian height
max_ch = np.argmax(spectra[peak_channels[0]:peak_channels[1]])
Tpeak = spectra[peak_channels[0]:peak_channels[1]][max_ch]
# Use the velocity of the brightness Temp peak as
# initial guess for Gaussian mean
vpeak = spectra_x_axis_kms[peak_channels[0]:peak_channels[1]][max_ch]
rms = np.std(np.append(spectra[0:(peak_channels[0]-1)], spectra[(peak_channels[1]+1):len(spectra)]))
err1 = np.zeros(shape[0])+rms
# Create a noise spectrum based on rms of off-line channels
# This will be added to best-fit Gaussian to obtain a noisy Gaussian
noise=np.random.normal(0.,rms,len(spectra_x_axis_kms))
# Define initial guesses for Gaussian fit
guess = [Tpeak, vpeak, 0.3] # [height, mean, sigma]
try:
coeffs, covar_mat = curve_fit(p_eval, xdata=spectra_x_axis_kms, ydata=spectra, p0=guess, sigma=err1, maxfev=500)
gauss = np.array(p_eval(spectra_x_axis_kms,coeffs[0], coeffs[1], coeffs[2]))
noisy_gauss = np.array(p_eval(spectra_x_axis_kms,coeffs[0], coeffs[1], coeffs[2]))+noise
params = np.append(coeffs, (covar_mat[0][0]**0.5, covar_mat[1][1]**0.5, covar_mat[2][2]**0.5))
# params = ['Tpeak', 'VLSR','sigma','Tpeak_err','VLSR_err','sigma_err']
# Don't accept fit if fitted parameters are non-physical or too uncertain
if (params[0] < 0.01) or (params[3] > 1.0) or (params[2] < 0.05) or (params[5] > 0.5) or (params[4] > 0.75):
noisy_gauss = nan_array
gauss = nan_array
params = nan_array2
# Don't accept fit if the SNR for fitted spectrum is less than SNR threshold
#if max(gauss)/rms < snr_min:
# noisy_gauss = nan_array
# gauss = nan_array
# params = nan_array2
except RuntimeError:
noisy_gauss = nan_array
gauss = nan_array
params = nan_array2
return i, j, gauss, params, noisy_gauss
# Parallel computation:
nproc = multicore # maximum number of simultaneous processes desired
queue = pprocess.Queue(limit=nproc)
calc = queue.manage(pprocess.MakeParallel(pix_fit))
tic=time.time()
counter = 0
# Uncomment to see some plots of the fitted spectra
#for i,j in zip(x,y):
#pix_fit(i,j)
#plt.plot(spectra_x_axis_kms, spectra, color='blue', drawstyle='steps')
#plt.plot(spectra_x_axis_kms, gauss, color='red')
#plt.show()
#plt.close()
# Begin parallel computations
# Store the best-fit Gaussians and parameters
# in their correct positions in the previously created cubes
for i,j in zip(x,y):
calc(i,j)
for i,j,gauss_spec,parameters,noisy_gauss_spec in queue:
cube_gauss[:,i,j]=gauss_spec
param_cube[:,i,j]=parameters
cube_gauss_noise[:,i,j]=noisy_gauss_spec
counter+=1
print str(counter) + ' of ' + str(pixels) + ' pixels completed \r',
sys.stdout.flush()
print "\n %f s for parallel computation." % (time.time() - tic)
# Save final cubes
# These will be in km/s units.
# Spectra will have larger values to the left, lower values to right
cube_final_gauss = SpectralCube(data=cube_gauss, wcs=cube_km.wcs, header=cube_km.header)
cube_final_gauss.write(GaussOut, format='fits', overwrite=True)
cube_final_gauss_noise = SpectralCube(data=cube_gauss_noise, wcs=cube_km.wcs, header=cube_km.header)
cube_final_gauss_noise.write(GaussNoiseOut, format='fits', overwrite=True)
# Construct appropriate header for param_cube
param_header['NAXIS3'] = len(nan_array2)
param_header['WCSAXES'] = 3
param_header['CRPIX3'] = 1
param_header['CDELT3'] = 1
param_header['CRVAL3'] = 0
param_header['PLANE1'] = 'Tpeak'
param_header['PLANE2'] = 'VLSR'
param_header['PLANE3'] = 'sigma'
param_header['PLANE5'] = 'Tpeak_err'
param_header['PLANE6'] = 'VLSR_err'
param_header['PLANE7'] = 'sigma_err'
fits.writeto(ParamOut, param_cube, header=param_header, clobber=True)
### Examples ###
# Fit the HC5N data in Cepheus_L1251, without convolution
#gauss_fitter(region = 'Cepheus_L1251', snr_min = 7.0, mol = 'HC5N', vmin=-6.3, vmax=-2.2, multicore=3)
# Convolve the HC5N data in Cepheus_L1251 to a spatial resolution of 64 arcseconds,
# then fit a Gaussian to all pixels above SNR=3
#gauss_fitter(region = 'Cepheus_L1251', direct = '/Users/jkeown/Desktop/GAS_dendro/', snr_min = 3.0, mol = 'HC5N', peak_channels = [402,460], convolve=64., use_old_conv=False)
| mit |
andrewnc/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 127 | 7477 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
mojoboss/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
ehickox2012/bitraider | bitraider/strategy.py | 2 | 6391 | import sys
import pytz
#import xml.utils.iso8601
import time
import numpy
from datetime import date, datetime, timedelta
from matplotlib import pyplot as plt
from exchange import cb_exchange as cb_exchange
from exchange import CoinbaseExchangeAuth
from abc import ABCMeta, abstractmethod
class strategy(object):
"""`strategy` defines an abstract base strategy class. Minimum required to create a strategy is a file with a class which inherits from strategy containing a backtest_strategy function. As a bonus, strategy includes utility functions like calculate_historic_data.
"""
__metaclass__ = ABCMeta
def __init__(name="default name", interval=5):
"""Constructor for an abstract strategy. You can modify it as needed.
\n`interval`: a.k.a timeslice the amount of time in seconds for each 'tick' default is 5
\n`name`: a string name for the strategy
"""
self.name = name
self.interval = interval
self.times_recalculated = 0
@abstractmethod
def trade(self, timeslice):
"""Perform operations on a timeslice.
\n`timeslice`: a section of trade data with time length equal to the strategy's interval, formatted as follows:
\n[time, low, high, open, close, volume]
"""
return
def backtest_strategy(self, historic_data):
"""Returns performance of a strategy vs market performance.
"""
# Reverse the data since Coinbase returns it in reverse chronological
# now historic_data strarts with the oldest entry
historic_data = list(reversed(historic_data))
earliest_time = float(historic_data[0][0])
latest_time = float(historic_data[-1][0])
start_price = float(historic_data[0][4])
end_price = float(historic_data[-1][4])
market_performance = ((end_price-start_price)/start_price)*100
print("Running simulation on historic data. This may take some time....")
for timeslice in historic_data:
# Display what percent through the data we are
idx = historic_data.index(timeslice)
percent = (float(idx)/float(len(historic_data)))*100 + 1
sys.stdout.write("\r%d%%" % percent)
sys.stdout.flush()
self.trade(timeslice)
# Calculate performance
end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc)
end_amt = (float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal)
start_amt = (float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc)
strategy_performance = ((end_amt-start_amt)/start_amt)*100
print("\n")
print("Times recalculated: "+str(self.times_recalculated))
print("Times bought: "+str(self.exchange.times_bought))
print("Times sold: "+str(self.exchange.times_sold))
print("The Market's performance: "+str(market_performance)+" %")
print("Strategy's performance: "+str(strategy_performance)+" %")
print("Account's ending value if no trades were made: "+str(end_amt_no_trades)+" BTC")
print("Account's ending value with this strategy: "+str(end_amt)+" BTC")
strategy_performance_vs_market = strategy_performance - market_performance
if strategy_performance > market_performance:
print("Congratulations! This strategy has beat the market by: "+str(strategy_performance_vs_market)+" %")
elif strategy_performance < market_performance:
print("This strategy has preformed: "+str(strategy_performance_vs_market)+" % worse than market.")
return strategy_performance_vs_market, strategy_performance, market_performance
@staticmethod
def calculate_historic_data(data, pivot):
"""Returns average price weighted according to volume, and the number of bitcoins traded
above and below a price point, called a pivot.\n
\npivot: the price used for returning volume above and below
\ndata: a list of lists formated as follows [time, low, high, open, close]
\n[
\n\t["2014-11-07 22:19:28.578544+00", "0.32", "4.2", "0.35", "4.2", "12.3"],
\n\t\t...
\n]
"""
price_list = []
weights = []
if data is None:
pass
min_price = float(data[0][1])
max_price = float(data[0][2])
discrete_prices = {}
for timeslice in data:
timeslice = [float(i) for i in timeslice]
if max_price < timeslice[2]:
max_prie = timeslice[2]
if min_price > timeslice[1]:
min_price = timeslice[1]
closing_price = timeslice[4]
volume = timeslice[5]
if closing_price not in discrete_prices.keys():
discrete_prices[str(closing_price)] = volume
else:
discrete[str(closing_price)] += volume
idx = data.index(timeslice)
price_list.append(closing_price)
weights.append(volume)
fltprices = [float(i) for i in discrete_prices.keys()]
fltvolumes = [float(i) for i in discrete_prices.values()]
np_discrete_prices = numpy.array(fltprices)
np_volume_per_price = numpy.array(fltvolumes)
weighted_avg = numpy.average(np_discrete_prices, weights=np_volume_per_price)
num_above = 0
num_below = 0
num_at = 0
for key in discrete_prices.keys():
value = discrete_prices[key]
if float(key) > pivot:
num_above+=value
elif float(key) < pivot:
num_below+=value
elif float(key) == pivot:
num_at+=value
total_volume = 0.0
for volume in fltvolumes:
total_volume+=volume
fltprops = []
for volume in fltvolumes:
fltprops.append((volume/total_volume))
#print("num_below: "+str(num_below))
#print("num_above: "+str(num_above))
#print("num_at: "+str(num_at))
#print("weighted_average: "+str(weighted_avg))
#plt.title("Price distribution")
#plt.xlabel("Price (USD)")
#plt.ylabel("Volume")
#plt.bar(fltprices, fltprops)
#plt.show()
return weighted_avg, num_above, num_below
| mit |
untom/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
shusenl/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 103 | 4394 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
henrykironde/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
chrsrds/scikit-learn | sklearn/neural_network/tests/test_stochastic_optimizers.py | 2 | 4212 | import numpy as np
from sklearn.neural_network._stochastic_optimizers import (BaseOptimizer,
SGDOptimizer,
AdamOptimizer)
from sklearn.utils.testing import assert_array_equal
shapes = [(4, 6), (6, 8), (7, 8, 9)]
def test_base_optimizer():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = BaseOptimizer(params, lr)
assert optimizer.trigger_stopping('', False)
def test_sgd_optimizer_no_momentum():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = SGDOptimizer(params, lr, momentum=0, nesterov=False)
grads = [np.random.random(shape) for shape in shapes]
expected = [param - lr * grad for param, grad in zip(params, grads)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=False)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_trigger_stopping():
params = [np.zeros(shape) for shape in shapes]
lr = 2e-6
optimizer = SGDOptimizer(params, lr, lr_schedule='adaptive')
assert not optimizer.trigger_stopping('', False)
assert lr / 5 == optimizer.learning_rate
assert optimizer.trigger_stopping('', False)
def test_sgd_optimizer_nesterovs_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=True)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
updates = [momentum * update - lr * grad
for update, grad in zip(updates, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_adam_optimizer():
params = [np.zeros(shape) for shape in shapes]
lr = 0.001
epsilon = 1e-8
for beta_1 in np.arange(0.9, 1.0, 0.05):
for beta_2 in np.arange(0.995, 1.0, 0.001):
optimizer = AdamOptimizer(params, lr, beta_1, beta_2, epsilon)
ms = [np.random.random(shape) for shape in shapes]
vs = [np.random.random(shape) for shape in shapes]
t = 10
optimizer.ms = ms
optimizer.vs = vs
optimizer.t = t - 1
grads = [np.random.random(shape) for shape in shapes]
ms = [beta_1 * m + (1 - beta_1) * grad
for m, grad in zip(ms, grads)]
vs = [beta_2 * v + (1 - beta_2) * (grad ** 2)
for v, grad in zip(vs, grads)]
learning_rate = lr * np.sqrt(1 - beta_2 ** t) / (1 - beta_1**t)
updates = [-learning_rate * m / (np.sqrt(v) + epsilon)
for m, v in zip(ms, vs)]
expected = [param + update
for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
| bsd-3-clause |
lionaneesh/sugarlabs-calculate | plotlib.py | 1 | 9620 | # plotlib.py, svg plot generator by Reinier Heeres <[email protected]>
# Copyright (C) 2012 Aneesh Dogra <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Change log:
# 2007-09-04: rwh, first version
import types
import logging
_logger = logging.getLogger('PlotLib')
USE_MPL = True
def format_float(x):
return ('%.2f' % x).rstrip('0').rstrip('.')
class _PlotBase:
"""Class to generate an svg plot for a function.
Evaluation of values is done using the EqnParser class."""
def __init__(self, parser):
self.svg_data = ""
self.parser = parser
def get_svg(self):
return self.svg_data
def set_svg(self, data):
self.svg_data = data
def evaluate(self, eqn, var, range, points=100):
x_old = self.parser.get_var(var)
if type(eqn) in (types.StringType, types.UnicodeType):
eqn = self.parser.parse(eqn)
res = []
d = float((range[1] - range[0])) / (points - 1)
x = range[0]
while points > 0:
self.parser.set_var(var, x)
ret = self.parser.evaluate(eqn)
if ret is not None:
v = float(ret)
else:
v = 0
res.append((x, v))
x += d
points -= 1
self.parser.set_var(var, x_old)
return res
def export_plot(self, fn):
f = open(fn, "w")
f.write(self.get_svg())
f.close()
def produce_plot(self, vals, *args, **kwargs):
'''Function to produce the actual plot, override.'''
pass
def plot(self, eqn, **kwargs):
'''
Plot function <eqn>.
kwargs can contain: 'points'
The last item in kwargs is interpreted as the variable that should
be varied.
'''
_logger.debug('plot(): %r, %r', eqn, kwargs)
if len(kwargs) == 0:
_logger.error('No variables specified.')
return None
points = kwargs.pop('points', 100)
if len(kwargs) > 1:
_logger.error('Too many variables specified')
return None
for var, range in kwargs.iteritems():
_logger.info('Plot range for var %s: %r', var, range)
vals = self.evaluate(eqn, var, range, points=points)
_logger.debug('vals are %r', vals)
svg = self.produce_plot(vals, xlabel=var, ylabel='f(x)')
_logger.debug('SVG Data: %s', svg)
self.set_svg(svg)
# self.export_plot("/tmp/calculate_graph.svg")
if type(svg) is types.UnicodeType:
return svg.encode('utf-8')
else:
return svg
class CustomPlot(_PlotBase):
def __init__(self, parser):
_PlotBase.__init__(self, parser)
self.set_size(0, 0)
def set_size(self, width, height):
self.width = width
self.height = height
def create_image(self):
self.svg_data = '<?xml version="1.0" standalone="no"?>\n'
self.svg_data += '<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n'
self.svg_data += '<svg width="%d" height="%d" version="1.1" xmlns="http://www.w3.org/2000/svg">\n' % (self.width, self.height)
def finish_image(self):
self.svg_data += '</svg>'
def plot_line(self, c0, c1, col):
c0 = self.rcoords_to_coords(c0)
c1 = self.rcoords_to_coords(c1)
self.svg_data += '<line style="stroke:%s;stroke-width:1" x1="%f" y1="%f" x2="%f" y2="%f" />\n' % (col, c0[0], c0[1], c1[0], c1[1])
def plot_polyline(self, coords, col):
self.svg_data += '<polyline style="fill:none;stroke:%s;stroke-width:1" points="' % (col)
for c in coords:
c = self.rcoords_to_coords(c)
self.svg_data += '%f,%f ' % (c[0], c[1])
self.svg_data += '" />\n'
def add_text(self, c, text, rotate=0):
if type(text) is types.UnicodeType:
text = text.encode('utf-8')
c = self.rcoords_to_coords(c)
self.svg_data += '<text x="%f" y="%f"' % (c[0], c[1])
if rotate != 0:
self.svg_data += ' transform="rotate(%d)"' % (rotate)
self.svg_data += '>%s</text>\n' % (text)
def determine_bounds(self, vals):
self.minx = self.miny = 1e99
self.maxx = self.maxy = -1e99
for (x, y) in vals:
self.minx = min(float(x), self.minx)
self.miny = min(float(y), self.miny)
self.maxx = max(float(x), self.maxx)
self.maxy = max(float(y), self.maxy)
if self.minx == self.maxx:
x_space = 0.5
else:
x_space = 0.02 * (self.maxx - self.minx)
self.minx -= x_space
self.maxx += x_space
if self.miny == self.maxy:
y_space = 0.5
else:
y_space = 0.02 * (self.maxy - self.miny)
self.miny -= y_space
self.maxy += y_space
def rcoords_to_coords(self, pair):
"""Convert fractional coordinates to image coordinates"""
return (pair[0] * self.width, pair[1] * self.height)
def vals_to_rcoords(self, pair):
"""Convert values to fractional coordinates"""
ret = (0.1 + (pair[0] - self.minx) / (self.maxx - self.minx) * 0.8,
0.9 - (pair[1] - self.miny) / (self.maxy - self.miny) * 0.8)
return ret
def add_curve(self, vals):
self.determine_bounds(vals)
c = []
for v in vals:
c.append(self.vals_to_rcoords(v))
# print 'coords: %r' % c
self.plot_polyline(c, "blue")
def get_label_vals(self, startx, endx, n, opts=()):
"""Return label values"""
range = endx - startx
logrange = log(range)
haszero = (startx < 0 & endx < 0)
def draw_axes(self, labelx, labely, val):
"""Draw axes on the plot."""
F = 0.8
NOL = 4 # maximum no of labels
y_coords = sorted([i[1] for i in val])
x_coords = sorted([i[0] for i in val])
max_y = max(y_coords)
min_y = min(y_coords)
max_x = max(x_coords)
min_x = min(x_coords)
# X axis
interval = len(val)/(NOL - 1)
self.plot_line((0.11, 0.89), (0.92, 0.89), "black")
if max_x != min_x:
self.add_text((0.11 + min_x + F * 0, 0.93), format_float(min_x))
plot_index = interval
while plot_index <= len(val) - interval:
self.add_text((0.11 + F * abs(x_coords[plot_index] - min_x) / \
abs(max_x - min_x), 0.93),
format_float(x_coords[plot_index]))
plot_index += interval
self.add_text((0.11 + F * 1, 0.93), format_float(max_x))
else:
self.add_text((0.5 , 0.93), format_float(min_x))
self.add_text((0.50, 0.98), labelx)
# Y axis
interval = float(max_y - min_y)/(NOL - 1)
self.plot_line((0.11, 0.08), (0.11, 0.89), "black")
# if its a constant function we only need to plot one label
if min_y == max_y:
self.add_text((-0.50, 0.10), format_float(min_y), rotate=-90)
else:
self.add_text((-0.90, 0.10), format_float(min_y), rotate=-90)
plot_value = min_y + interval
while plot_value <= max_y - interval:
self.add_text((-(0.91 - F * abs(plot_value - min_y) / \
abs(max_y - min_y)), 0.10),
format_float(plot_value), rotate=-90)
plot_value += interval
self.add_text((-(0.89 - F), 0.10), format_float(max_y), rotate=-90)
self.add_text((-0.50, 0.045), labely, rotate=-90)
def produce_plot(self, vals, *args, **kwargs):
"""Produce an svg plot."""
self.set_size(250, 250)
self.create_image()
self.draw_axes(kwargs.get('xlabel', ''), kwargs.get('ylabel', ''), vals)
self.add_curve(vals)
self.finish_image()
return self.svg_data
class MPLPlot(_PlotBase):
def __init__(self, parser):
_PlotBase.__init__(self, parser)
def produce_plot(self, vals, **kwargs):
x = [c[0] for c in vals]
y = [c[1] for c in vals]
fig = pylab.figure()
fig.set_size_inches(5, 5)
ax = fig.add_subplot(111)
ax.plot(x, y, 'r-')
ax.set_xlabel(kwargs.get('xlabel', ''))
ax.set_ylabel(kwargs.get('ylabel', ''))
data = StringIO.StringIO()
fig.savefig(data)
return data.getvalue()
if USE_MPL:
try:
import matplotlib as mpl
mpl.use('svg')
from matplotlib import pylab
import StringIO
Plot = MPLPlot
_logger.debug('Using matplotlib as plotting back-end')
except ImportError:
USE_MPL = False
if not USE_MPL:
Plot = CustomPlot
_logger.debug('Using custom plotting back-end')
| gpl-2.0 |
scipy/scipy | scipy/stats/_stats_mstats_common.py | 12 | 16438 | import numpy as np
import scipy.stats.stats
from . import distributions
from .._lib._bunch import _make_tuple_bunch
__all__ = ['_find_repeats', 'linregress', 'theilslopes', 'siegelslopes']
# This is not a namedtuple for backwards compatibility. See PR #12983
LinregressResult = _make_tuple_bunch('LinregressResult',
['slope', 'intercept', 'rvalue',
'pvalue', 'stderr'],
extra_field_names=['intercept_stderr'])
def linregress(x, y=None, alternative='two-sided'):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length. If
only `x` is given (and ``y=None``), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension. In
the case where ``y=None`` and `x` is a 2x2 array, ``linregress(x)`` is
equivalent to ``linregress(x[0], x[1])``.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the slope of the regression line is nonzero
* 'less': the slope of the regression line is less than zero
* 'greater': the slope of the regression line is greater than zero
.. versionadded:: 1.7.0
Returns
-------
result : ``LinregressResult`` instance
The return value is an object with the following attributes:
slope : float
Slope of the regression line.
intercept : float
Intercept of the regression line.
rvalue : float
Correlation coefficient.
pvalue : float
The p-value for a hypothesis test whose null hypothesis is
that the slope is zero, using Wald Test with t-distribution of
the test statistic. See `alternative` above for alternative
hypotheses.
stderr : float
Standard error of the estimated slope (gradient), under the
assumption of residual normality.
intercept_stderr : float
Standard error of the estimated intercept, under the assumption
of residual normality.
See Also
--------
scipy.optimize.curve_fit :
Use non-linear least squares to fit a function to data.
scipy.optimize.leastsq :
Minimize the sum of squares of a set of equations.
Notes
-----
Missing values are considered pair-wise: if a value is missing in `x`,
the corresponding value in `y` is masked.
For compatibility with older versions of SciPy, the return value acts
like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``,
``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write::
slope, intercept, r, p, se = linregress(x, y)
With that style, however, the standard error of the intercept is not
available. To have access to all the computed values, including the
standard error of the intercept, use the return value as an object
with attributes, e.g.::
result = linregress(x, y)
print(result.intercept, result.intercept_stderr)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> rng = np.random.default_rng()
Generate some data:
>>> x = rng.random(10)
>>> y = 1.6*x + rng.random(10)
Perform the linear regression:
>>> res = stats.linregress(x, y)
Coefficient of determination (R-squared):
>>> print(f"R-squared: {res.rvalue**2:.6f}")
R-squared: 0.717533
Plot the data along with the fitted line:
>>> plt.plot(x, y, 'o', label='original data')
>>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line')
>>> plt.legend()
>>> plt.show()
Calculate 95% confidence interval on slope and intercept:
>>> # Two-sided inverse Students t-distribution
>>> # p - probability, df - degrees of freedom
>>> from scipy.stats import t
>>> tinv = lambda p, df: abs(t.ppf(p/2, df))
>>> ts = tinv(0.05, len(x)-2)
>>> print(f"slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}")
slope (95%): 1.453392 +/- 0.743465
>>> print(f"intercept (95%): {res.intercept:.6f}"
... f" +/- {ts*res.intercept_stderr:.6f}")
intercept (95%): 0.616950 +/- 0.544475
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
raise ValueError("If only `x` is given as input, it has to "
"be of shape (2, N) or (N, 2); provided shape "
f"was {x.shape}.")
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# Average sums of square differences from the mean
# ssxm = mean( (x-mean(x))^2 )
# ssxym = mean( (x-mean(x)) * (y-mean(y)) )
ssxm, ssxym, _, ssym = np.cov(x, y, bias=1).flat
# R-value
# r = ssxym / sqrt( ssxm * ssym )
if ssxm == 0.0 or ssym == 0.0:
# If the denominator was going to be 0
r = 0.0
else:
r = ssxym / np.sqrt(ssxm * ssym)
# Test for numerical error propagation (make sure -1 < r < 1)
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
slope = ssxym / ssxm
intercept = ymean - slope*xmean
if n == 2:
# handle case when only two points are passed in
if y[0] == y[1]:
prob = 1.0
else:
prob = 0.0
slope_stderr = 0.0
intercept_stderr = 0.0
else:
df = n - 2 # Number of degrees of freedom
# n-2 degrees of freedom because 2 has been used up
# to estimate the mean and standard deviation
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
t, prob = scipy.stats.stats._ttest_finish(df, t, alternative)
slope_stderr = np.sqrt((1 - r**2) * ssym / ssxm / df)
# Also calculate the standard error of the intercept
# The following relationship is used:
# ssxm = mean( (x-mean(x))^2 )
# = ssx - sx*sx
# = mean( x^2 ) - mean(x)^2
intercept_stderr = slope_stderr * np.sqrt(ssxm + xmean**2)
return LinregressResult(slope=slope, intercept=intercept, rvalue=r,
pvalue=prob, stderr=slope_stderr,
intercept_stderr=intercept_stderr)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
See also
--------
siegelslopes : a similar technique using repeated medians
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on
Kendall's tau", J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" %
(len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
sum(k * (k-1) * (2*k + 5) for k in nxreps) -
sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
def siegelslopes(y, x=None, method="hierarchical"):
r"""
Computes the Siegel estimator for a set of points (x, y).
`siegelslopes` implements a method for robust linear regression
using repeated medians (see [1]_) to fit a line to the points (x, y).
The method is robust to outliers with an asymptotic breakdown point
of 50%.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
method : {'hierarchical', 'separate'}
If 'hierarchical', estimate the intercept using the estimated
slope ``medslope`` (default option).
If 'separate', estimate the intercept independent of the estimated
slope. See Notes for details.
Returns
-------
medslope : float
Estimate of the slope of the regression line.
medintercept : float
Estimate of the intercept of the regression line.
See also
--------
theilslopes : a similar technique without repeated medians
Notes
-----
With ``n = len(y)``, compute ``m_j`` as the median of
the slopes from the point ``(x[j], y[j])`` to all other `n-1` points.
``medslope`` is then the median of all slopes ``m_j``.
Two ways are given to estimate the intercept in [1]_ which can be chosen
via the parameter ``method``.
The hierarchical approach uses the estimated slope ``medslope``
and computes ``medintercept`` as the median of ``y - medslope*x``.
The other approach estimates the intercept separately as follows: for
each point ``(x[j], y[j])``, compute the intercepts of all the `n-1`
lines through the remaining points and take the median ``i_j``.
``medintercept`` is the median of the ``i_j``.
The implementation computes `n` times the median of a vector of size `n`
which can be slow for large vectors. There are more efficient algorithms
(see [2]_) which are not implemented here.
References
----------
.. [1] A. Siegel, "Robust Regression Using Repeated Medians",
Biometrika, Vol. 69, pp. 242-244, 1982.
.. [2] A. Stein and M. Werman, "Finding the repeated median regression
line", Proceedings of the Third Annual ACM-SIAM Symposium on
Discrete Algorithms, pp. 409-413, 1992.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope and intercept. For comparison, also compute the
least-squares fit with `linregress`:
>>> res = stats.siegelslopes(y, x)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Siegel regression line is shown in red. The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
if method not in ['hierarchical', 'separate']:
raise ValueError("method can only be 'hierarchical' or 'separate'")
y = np.asarray(y).ravel()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.asarray(x, dtype=float).ravel()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" %
(len(y), len(x)))
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes, intercepts = [], []
for j in range(len(x)):
id_nonzero = deltax[j, :] != 0
slopes_j = deltay[j, id_nonzero] / deltax[j, id_nonzero]
medslope_j = np.median(slopes_j)
slopes.append(medslope_j)
if method == 'separate':
z = y*x[j] - y[j]*x
medintercept_j = np.median(z[id_nonzero] / deltax[j, id_nonzero])
intercepts.append(medintercept_j)
medslope = np.median(np.asarray(slopes))
if method == "separate":
medinter = np.median(np.asarray(intercepts))
else:
medinter = np.median(y - medslope*x)
return medslope, medinter
| bsd-3-clause |
shangwuhencc/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
ricket1978/ggplot | ggplot/stats/stat_hline.py | 12 | 1317 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
from ggplot.utils import pop, make_iterable, make_iterable_ntimes
from ggplot.utils.exceptions import GgplotError
from .stat import stat
class stat_hline(stat):
DEFAULT_PARAMS = {'geom': 'hline', 'position': 'identity',
'yintercept': 0}
CREATES = {'yintercept'}
def _calculate(self, data):
y = pop(data, 'y', None)
# yintercept may be one of:
# - aesthetic to geom_hline or
# - parameter setting to stat_hline
yintercept = pop(data, 'yintercept', self.params['yintercept'])
if hasattr(yintercept, '__call__'):
if y is None:
raise GgplotError(
'To compute the intercept, y aesthetic is needed')
try:
yintercept = yintercept(y)
except TypeError as err:
raise GgplotError(*err.args)
yintercept = make_iterable(yintercept)
new_data = pd.DataFrame({'yintercept': yintercept})
# Copy the other aesthetics into the new dataframe
n = len(yintercept)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
| bsd-2-clause |
eli-s-goldberg/Praetorius_Goldberg_2016 | src/examples/NatVsTech_API_example.py | 2 | 6340 | # coding: utf-8
# ## Imports
# Note: python3. Please install requirements using requirments.txt in main directory.
# In[1]:
import sys
import glob
import fnmatch
import os.path
import pandas as pd
import matplotlib.pyplot as plt
from nanogbcdt.DataUtil import DataUtil
from nanogbcdt.NatVsTech import NatVsTech
# sys.path.append(os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir)))
from sklearn.model_selection import GridSearchCV
# ## Directory structure
# Note: we generically define directory so it will work on any OS: mac/pc/linux.
# Note: drop the "" around "__file__" when in a regular python file.
# In[2]:
PARENT_PATH = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir))
DATABASES_BASEPATH = os.path.abspath(os.path.join(os.path.dirname("__file__"), 'databases'))
IMPORT_TRAINING_DATABASE_PATH = os.path.abspath(
os.path.join(DATABASES_BASEPATH, 'training_data'))
IMPORT_TESTING_DATABASE_PATH = os.path.abspath(
os.path.join(DATABASES_BASEPATH, 'test_data'))
OUTPUT_DATA_SUMMARY_PATH = os.path.abspath(
os.path.join(os.path.dirname("__file__"), 'output'))
# print the paths, just to make sure things make sense
print(PARENT_PATH)
print(DATABASES_BASEPATH)
print(IMPORT_TRAINING_DATABASE_PATH)
print(OUTPUT_DATA_SUMMARY_PATH)
# ## Training files
# Import training files, combine, and concatenate into dataframes.
# Note: if you re-run the notebook without resetting the kernal, you'll get an error. Restart the notebook kernal and
# it will work.
# In[3]:
# set the natural and technical database training file names
NATURAL_TRAINING_DATABASE_NAME_ = 'natural_training_data.csv'
TECHNICAL_TRAINING_DATABASE_NAME_ = 'technical_training_data.csv'
# change the directory to the import training data path
os.chdir(IMPORT_TRAINING_DATABASE_PATH)
# find all csv's in the directory
training_files = glob.glob('*.csv')
# iterate through files and assign classification id
for file in training_files:
if fnmatch.fnmatchcase(file, TECHNICAL_TRAINING_DATABASE_NAME_):
technical_training_database = pd.DataFrame.from_csv(
os.path.join(file), header=0, index_col=None)
# assign classification id
technical_training_database['classification'] = 0
elif fnmatch.fnmatchcase(file, NATURAL_TRAINING_DATABASE_NAME_):
natural_training_database = pd.DataFrame.from_csv(
os.path.join(file), header=0, index_col=None)
# assign classification id
natural_training_database['classification'] = 1
print(training_files)
# concatenate all the data into a single file
training_data = pd.concat([natural_training_database,
technical_training_database])
# remoove all the na values (other filtering done later)
training_data = DataUtil.filter_na(training_data)
# ## Using the API
# Before you can use the API, you have to initialize the class. We'll then work through how the data is easily
# filtered, stored, and used for training and prediction.
# In[4]:
# initialize class
nat_v_tech = NatVsTech()
print(nat_v_tech)
# In[5]:
# filter the data of negative values
neg_filt_training_data = DataUtil.filter_negative(data=training_data)
# threshold the data with a single isotope trigger
thresh_neg_filt_training_data = DataUtil.apply_detection_threshold(data=neg_filt_training_data, threshold_value=5)
# print to maake sure we're on target
print(thresh_neg_filt_training_data.head())
# In[6]:
# right now training data contains the classification data. Split it.
(training_df, target_df) = DataUtil.split_target_from_training_data(df=thresh_neg_filt_training_data)
# print training data to check structure
print(training_df.head())
# print target data to check structure
print(target_df.head())
# In[8]:
# conform the test data for ML and store it as X and y.
# (X, y) = nat_v_tech.conform_data_for_ML(training_df=training_df, target_df=target_df)
# initialize gbc parameters to determine max estimators with least overfitting
GBC_INIT_PARAMS = {'loss': 'deviance', 'learning_rate': 0.1,
'min_samples_leaf': 100, 'n_estimators': 1000,
'max_depth': 5, 'random_state': None, 'max_features': 'sqrt'}
# print to verify parameter init structure
print(GBC_INIT_PARAMS)
# outline grid search parameters
# set optimum boosting stages. Note: n_estimators automatically set
GBC_GRID_SEARCH_PARAMS = {'loss': ['exponential', 'deviance'],
'learning_rate': [0.01, 0.1],
'min_samples_leaf': [50, 100],
'random_state': [None],
'max_features': ['sqrt', 'log2'],
'max_depth': [5],
'n_estimators': [50]}
print(GBC_GRID_SEARCH_PARAMS)
# determining optimum feature selection with rfecv
result = nat_v_tech.rfecv_feature_identify(training_df=training_df, target_df=target_df,
gbc_grid_params=GBC_GRID_SEARCH_PARAMS,
gbc_init_params=GBC_INIT_PARAMS,
n_splits=3)
print(result.name_list_)
print(result.grid_scores_)
print(result.holdout_predictions_)
print(result.class_scores_f1_)
print(result.class_scores_r2_)
print(result.class_scores_mae_)
print(result.feature_importances_)
result.grid_scores_.plot(kind="box", ylim=[0, 1])
plt.show()
# In[8]:
# find optimum boosting stages
optimum_boosting_stages = nat_v_tech.find_min_boosting_stages(gbc_base_params=GBC_INIT_PARAMS,
training_df=training_df,
target_df=target_df)[1]
# print optimum boosting stages
print(optimum_boosting_stages)
# In[9]:
# create grid search parameters in which to find the optimum set,
# print search parameter grid to verify init structure
print(GBC_GRID_SEARCH_PARAMS)
# In[10]:
# find the optimum gbc parameters
gbc_fitted = nat_v_tech.find_optimum_gbc_parameters(crossfolds=5,
training_df=training_df,
target_df=target_df,
gbc_search_params=GBC_GRID_SEARCH_PARAMS)
# print the optimum gbc structure
print(gbc_fitted)
# In[12]:
# use the X and y data to train the model. Then test the trained model against the test data and output results.
nat_v_tech.apply_trained_classification(test_data_path=IMPORT_TESTING_DATABASE_PATH,
output_summary_data_path=OUTPUT_DATA_SUMMARY_PATH,
output_summary_base_name='summary.csv',
track_class_probabilities=[0.1, 0.1],
isotope_trigger='140Ce',
gbc_fitted=gbc_fitted,
training_df=training_df,
target_df=target_df)
| apache-2.0 |
dblalock/dig | python/dig/datasets/pamap.py | 2 | 3257 |
import os
import matplotlib.pyplot as plt
from joblib import Memory
memory = Memory('./')
join = os.path.join
import paths
from ..utils.files import listFilesInDir, ensureDirExists
from pamap_common import *
# ================================================================
# consts
MISSING_DATA_VALUE = -1.0e6 # defined by data creators
# paths
INDOOR_DIR = join(paths.PAMAP, 'indoor')
OUTDOOR_DIR = join(paths.PAMAP, 'outdoor')
FIG_SAVE_DIR = join('figs','pamap')
SAVE_DIR_LINE_GRAPH = join(FIG_SAVE_DIR, 'line')
SAVE_DIR_IMG = join(FIG_SAVE_DIR, 'img')
# activity names
ACTIVITY_IDS_2_NAMES = {
0: NAME_OTHER,
1: NAME_LYING,
2: NAME_SITTING,
3: NAME_STANDING,
10: NAME_SLOW_WALK,
11: NAME_WALK,
12: NAME_NORDIC_WALK,
13: NAME_RUN,
14: NAME_ASCEND_STAIRS,
15: NAME_DESCEND_STAIRS,
16: NAME_CYCLE,
20: NAME_IRONING,
21: NAME_VACUUM,
22: NAME_JUMP_ROPE,
23: NAME_SOCCER
}
# column names
IMU_COL_NAMES = ['temp',
'accelX', 'accelY', 'accelZ',
'gyroX', 'gyroY', 'gyroZ',
'magX', 'magY', 'magZ',
'null1', 'null2', 'null3', 'null4']
ALL_COL_NAMES = INITIAL_COL_NAMES
ALL_COL_NAMES.extend([name + '_hand' for name in IMU_COL_NAMES])
ALL_COL_NAMES.extend([name + '_chest' for name in IMU_COL_NAMES])
ALL_COL_NAMES.extend([name + '_shoe' for name in IMU_COL_NAMES])
# ================================================================
# utility funcs
def getIndoorFilePaths():
return listFilesInDir(INDOOR_DIR, endswith='.dat', absPaths=True)
def getOutdoorFilePaths():
return listFilesInDir(OUTDOOR_DIR, endswith='.dat', absPaths=True)
# def dfFromFileAtPath(path):
# # read in the data file and pull out the
# # columns with valid data (and also replace
# # their missing data marker with nan
# data = np.genfromtxt(path)
# data[data == MISSING_DATA_VALUE] = np.nan
# df = pd.DataFrame(data=data, columns=ALL_COL_NAMES)
# return df.filter(COL_NAMES)
def getAllPamapRecordings():
for p in getIndoorFilePaths() + getOutdoorFilePaths():
yield PamapRecording(p)
# ================================================================
# recording class
class PamapRecording(Recording):
def __init__(self, filePath):
super(PamapRecording, self).__init__(filePath,
MISSING_DATA_VALUE, ALL_COL_NAMES, ACTIVITY_IDS_2_NAMES)
self.isIndoor = INDOOR_DIR in filePath
def __str__(self):
s = "in" if self.isIndoor else "out"
return "subj%d_%s" % (self.subjId, s)
@memory.cache
def buildRecording(filePath):
return PamapRecording(filePath)
# ================================================================
# main
if __name__ == '__main__':
ensureDirExists(SAVE_DIR_LINE_GRAPH)
ensureDirExists(SAVE_DIR_IMG)
# r = buildRecording(INDOOR_DIR + '/subject1.dat')
# plt.figure(figsize=(WIDTH_LINE_GRAPH, HEIGHT_LINE_GRAPH))
# r.plot()
# plt.figure(figsize=(WIDTH_IMG, HEIGHT_IMG))
# r.imshow(znorm=True)
# plt.show()
# plt.savefig(SAVE_DIR_LINE_GRAPH + str(r))
recs = getAllPamapRecordings()
for r in recs:
print('plotting recording: ' + str(r))
# plt.figure(figsize=(WIDTH_LINE_GRAPH, HEIGHT_LINE_GRAPH))
# r.plot()
# plt.savefig(join(SAVE_DIR_LINE_GRAPH, str(r)))
plt.figure(figsize=(WIDTH_IMG, HEIGHT_IMG))
r.imshow(znorm=True)
plt.savefig(join(SAVE_DIR_IMG,str(r)))
# plt.show()
| mit |
LabMagUBO/StoneX | StoneX/Cycles.py | 1 | 27875 | #!/opt/local/bin/env python
# -*- coding: utf-8 -*-
"""
Cycle class.
Copyright (C) 2016 Jérôme Richy
"""
## Global module
import numpy as np
import matplotlib.pylab as pl
from matplotlib.backends.backend_pdf import PdfPages # Multiple page pdf
## StoneX module
from StoneX.Logging import *
from StoneX.Physics import *
class Cycle(object):
"""
Class containing the cycle's data.
self.model : model used to calculate the data
self.T
self.phi
self.Ms
self.data : array [H, Mt, Ml, theta_eq, alpha_eq (eventually)]
"""
def __init__(self, Htab, cols):
"""
Initiate the cycle.
Arguments :
— Htab : field array
— cols : number of columns for the data (minimum 3 : H, Mt, Ml)
self.data : H, Mt, Ml,
"""
# Creating logger for all children classes
self.logger = init_log(__name__)
## Check the number of cols
if cols < 3:
self.logger.error("Unsufficient number of columns.")
self.logger.warn("Setting 3 columns instead.")
cols = 3
self.data = np.zeros((Htab.size, cols))
# Storing the fields
self.data[:, 0] = Htab
# Energy
self.energy = np.zeros(Htab.size, dtype=object)
def info(self, sample, phi):
"""
Save cycles conditions : T, phi, model
"""
self.T = sample.T
self.phi = phi
self.model = sample.model
self.Ms = sample.Ms
def sum(self, cycl, selfdensity, density):
"""
Method for summing two Cycle objects.
Only sums Mt & Ml.
The density is the weight factor for the second cycle.
"""
try:
if not (self.data[:, 0] == cycl.data[:, 0]).all():
self.logger.error("The cycles does not have the same field values.")
self.logger.warn("Summing anyway.")
except AttributeError as err:
self.logger.error("Error when summing the cycles. The data does not have the same shape.\n{}".format(err))
self.logger.critical("Stopping the program.")
sys.exit(0)
# Summing Mt & Ml
self.data[:, 1:3] = self.data[:, 1:3] * selfdensity + cycl.data[:, 1:3] * density
# Erasing the rest
self.data[:, 3:] = None
def calc_properties(self):
"""
Calculate, depending of self.data :
— Hc1, Hc2 : coercive fields
— Mr1, Mr2 : remanent magnetizations
— max(|Mt|) (decreasing and increasing field)
H for the field, M the magnetic moment
Zeros are calculated using linear regression.
Store :
— H_coer : two coercive fields (left, right)
— Mr : remanent magnetization
— Mt_max : max of transverse
"""
# Properties array
self.H_coer = np.zeros(2)
self.Mr = np.zeros(2)
self.Mt_max = np.zeros(2)
# Making views of the data
H = self.data[:, 0]
Mt = self.data[:, 1]
Ml = self.data[:, 2]
# Initial sign of magnetization
sign_Ml = (Ml[0] > 0)
sign_H = (H[0] > 0)
# Loop over the field
for i in np.arange(1, H.size):
# if change of sign for Ml -> coercive field
if (Ml[i] > 0) != sign_Ml:
sign_Ml = not sign_Ml
self.H_coer[sign_Ml] = H[i-1] - (H[i] - H[i-1])/(Ml[i] - Ml[i-1]) * Ml[i-1]
# if change of sign for H -> remanent magnetization moment
if (H[i] > 0) != sign_H:
sign_H = not sign_H
self.Mr[sign_H] = Ml[i-1] - (Ml[i] - Ml[i-1])/(H[i] - H[i-1]) * H[i-1]
# Calcul du maximum, aller retour (doit être symétrique)
half = H.size / 2
self.Mt_max[0] = np.amax(np.absolute(Mt[:half]))
self.Mt_max[1] = np.amax(np.absolute(Mt[half:]))
def plot(self, path):
"""
Trace et export le cycle Mt(H) et Ml(H)
"""
## Cycle figure
# Creating figure
fig = pl.figure()
fig.set_size_inches(18.5,10.5)
fig.suptitle("Model : {}, T = {}K, phi= {}deg".format(
self.model, self.T, np.degrees(self.phi)
))
# Initiating axis
ax = fig.add_subplot(111)
ax.grid(True)
# ax.set_title("Model : {}, T = {}K, phi= {}deg".format(self.model, self.T, np.degrees(self.phi)))
ax.set_ylabel("Mag. Moment (A.m²)")
# First axis
ax.plot(self.data[:, 0], self.data[:, 2], 'ro-', label='Ml')
ax.plot(self.data[:, 0], self.data[:, 1], 'go-', label='Mt')
ax.set_xlabel('Mag. Field H (A/m)')
ax.legend()
# Second axis (magnetic induction B)
x1, x2 = ax.get_xlim()
ax2 = ax.twiny()
ax2.set_xlim(mu_0 * x1 * 1e3, mu_0 * x2 * 1e3)
ax2.set_xlabel('Mag. Induction B (mT)')
# New y axis
ax3 = ax.twinx()
y1, y2 = ax.get_ylim()
ax3.set_ylim(y1 * 1e3 * 1e9, y2 * 1e3 * 1e9)
ax3.set_ylabel('Mag. Moment (micro emu)')
# Displaying mu_0 Hc1 and mu_0 Hc2 in millitestla
Bc = np.abs((self.H_coer[1] - self.H_coer[0]) / 2) * mu_0
Be = (self.H_coer[1] + self.H_coer[0]) / 2 * mu_0
y_lim = ax.get_ylim()
x_lim = ax.get_xlim()
x_text = (x_lim[1] - x_lim[0]) * 0.15 + x_lim[0]
y_text = (y_lim[1] - y_lim[0]) * 0.8 + y_lim[0]
ax.text(
x_text, y_text,
"Hc = {:.3}mT\nHe = {:.3}mT".format(Bc * 1e3, Be * 1e3),
style='italic',
bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10}
)
# Exporting graph as pdf
file = "{0}/cycle_{1}_T{2}_phi{3}.pdf".format(
path,
self.model,
round(self.T, 2),
round(np.degrees(self.phi), 2)
)
pl.savefig(file, dpi=100)
# Not forgetting to close figure (saves memory)
pl.close(fig)
def plot_energyPath(self, path):
"""
Plotting the path taken by the magnetic moments depending on H.
"""
# Create the PdfPages object to which we will save the pages:
# The with statement makes sure that the PdfPages object is closed properly at
# the end of the block, even if an Exception occurs.
pdfFile = "{}/path_T{}_phi{}.pdf".format(path, self.T, np.round(np.degrees(self.phi), 2))
with PdfPages(pdfFile) as pdf:
# Determine if the energy landscape is 2D or 1d
if len(self.energy[0].shape) == 2: # 2D
## Plotting (theta, alpha) path
Alpha = np.degrees(self.data[:, 4])
Theta = np.degrees(self.data[:, 3])
Num = Alpha.size/2
# Creating figure, with title
fig = pl.figure()
fig.set_size_inches(18.5,10.5)
fig.suptitle("Model : {}, T = {}K, phi= {}deg".format(self.model, np.round(self.T, 2), np.degrees(self.phi)))
# Axis
ax = fig.add_subplot(121, aspect='equal')
ax.plot(Theta[:Num], Alpha[:Num], '-ro', label="Decreasing H")
ax.plot(Theta[Num:], Alpha[Num:], '-bo', label="Increasing H")
ax.set_xlabel("Theta M_f(deg)")
ax.set_ylabel("Alpha M_af(deg)")
ax.set_xlim(0, 360)
ax.set_ylim(0, 360)
ax.legend()
ax.grid()
# Independant angles
ax2 = fig.add_subplot(122)
ax2.plot(Theta, '-ro', label='θ Mf')
ax2.plot(Alpha, '-bo', label='α Maf')
ax2.set_xlabel("Field index")
ax2.set_ylabel("Angle (deg)")
ax2.grid()
ax2.legend()
# Saving the figure
pdf.savefig()
# Closing
pl.close()
elif len(self.energy[0].shape) == 1:
# Resizing the energy table (in order to have a 2D-array)
# Transform a 1Darray of 1Darrays in a single 2D array
E = np.ma.zeros((self.energy.size, self.energy[0].size))
for i, val in enumerate(self.energy):
E[i, :] = val
# Field
Hmax = self.data[0, 0]
# Number of step
Num = self.data[:, 0].size
# Creating figure, with title
fig = pl.figure()
fig.set_size_inches(25, 10.5)
fig.suptitle("Model : {}, T = {}K, phi= {}deg".format(self.model, np.round(self.T, 2) , np.round(np.degrees(self.phi), 2)))
## Plotting energy : increasing field
ax = fig.add_subplot(121, aspect='equal')
# All energy states with transparency
cax_all = ax.imshow(E[:Num/2, :].data, alpha=0.5, label="Reachable states", interpolation = 'nearest', origin='upper', extent=(0, 360, -Hmax, Hmax), aspect='auto')
# Reachable states
cax_reach = ax.imshow(E[:Num/2, :], label="Reachable states", interpolation = 'nearest', origin='upper', extent=(0, 360, -Hmax, Hmax), aspect='auto')
cbar = fig.colorbar(cax_reach)
C = ax.contour(E[:Num/2, :].data, 10, colors='black', linewidth=.5, extent=(0, 360, Hmax, -Hmax))
#ax.clabel(C, inline=1, fontsize=10)
ax.plot(np.degrees(self.data[:Num/2, 3]), self.data[:Num/2, 0], 'ro', label='Eq.')
#ax.set_title("id:{}".format(HIdx))
ax.set_xlabel("Theta M_f(deg)")
ax.set_ylabel("H")
## Plotting energy : decreasing field
ax = fig.add_subplot(122, aspect='equal')
# All energy states with transparency
cax_all = ax.imshow(E[Num/2:, :].data, alpha=0.5, label="Reachable states", interpolation = 'nearest', origin='upper', extent=(0, 360, Hmax, -Hmax), aspect='auto')
# Reachable states
cax_reach = ax.imshow(E[Num/2:, :], label="Reachable states", interpolation = 'nearest', origin='upper', extent=(0, 360, Hmax, -Hmax), aspect='auto')
cbar = fig.colorbar(cax_reach)
C = ax.contour(E[Num/2:, :].data, 10, colors='black', linewidth=.5, extent=(0, 360, -Hmax, Hmax))
#ax.clabel(C, inline=1, fontsize=10)
#ax.imshow(E[Num/2:, :].mask, label="Reachable states", interpolation = 'nearest', origin='upper', extent=(0, 360, Hmax, -Hmax), aspect='auto')
ax.plot(np.degrees(self.data[Num/2:, 3]), self.data[Num/2:, 0], 'ro', label='Eq.')
#ax.set_title("id:{}".format(HIdx))
ax.set_xlabel("Theta M_f(deg)")
ax.set_ylabel("H")
# Saving the figure
pdf.savefig()
# Closing
pl.close()
else:
self.logger.warn("Cannot plot energy landscape.")
def plot_energyLandscape(self, path):
"""
Plotting the energy landscape of the cycles.
If the landscape have more than 2 variables (H, theta), plotting the landscape for each H value.
"""
# Create the PdfPages object to which we will save the pages:
# The with statement makes sure that the PdfPages object is closed properly at
# the end of the block, even if an Exception occurs.
pdfFile = "{}/landscape_T{}_phi{}.pdf".format(path, self.T, np.round(np.degrees(self.phi), 2))
with PdfPages(pdfFile) as pdf:
# Determine if the energy landscape is 2D or 1d
if len(self.energy[0].shape) == 2: # 2D
for i, line in enumerate(self.data):
H = line[0] #field
E = self.energy[i] #2D array
theta = np.degrees(line[3])
alpha = np.degrees(line[4])
# Creating figure, with title
fig = pl.figure()
fig.set_size_inches(18.5,10.5)
fig.suptitle("Model : {}, T = {}K, phi= {}deg, H = {} Oe".format(self.model, self.T, np.degrees(self.phi), np.round(convert_field(H, 'cgs'), 2)))
# Axis
ax = fig.add_subplot(111, aspect='equal')
cax1 = ax.imshow(E.data, label="Energy landscape", interpolation = 'nearest', origin='upper', alpha=0.6, extent=(0, 360, 360, 0))
cax2 = ax.imshow(E, label="Reachable states", interpolation = 'nearest', origin='upper', extent=(0, 360, 360, 0))
cbar1 = fig.colorbar(cax1)
cbar2 = fig.colorbar(cax2)
C = ax.contour(E.data, 10, colors='black', linewidth=.5, extent=(0, 360, 0, 360))
#ax.clabel(C, inline=1, fontsize=10)
ax.plot(theta, alpha, 'ro', label='Eq.')
#ax.set_title("id:{}".format(HIdx))
ax.set_xlabel("Theta M_f(deg)")
ax.set_ylabel("Alpha M_af(deg)")
# Saving the figure
pdf.savefig()
# Closing
pl.close()
elif len(self.energy[0].shape) == 1:
Num = self.energy[0].size
step = 360 / Num # Step in degrees
Theta = np.arange(Num) * step
for i, line in enumerate(self.data):
H = line[0] #field
E = self.energy[i] #1D array
theta = np.degrees(line[3])
# Creating figure, with title
fig = pl.figure()
fig.set_size_inches(18.5,10.5)
fig.suptitle("Model : {}, T = {}K, phi= {}deg, H = {} Oe".format(self.model, self.T, np.degrees(self.phi), np.round(convert_field(H, 'cgs'), 2)))
# Axis
ax = fig.add_subplot(111)
# Energy landscape
ax.plot(Theta, E.data, 'bo', label="Energy")
# Accessible states
ax.plot(Theta, E, 'go', label="Accessible")
# Position of the local minimum
ax.plot(theta, E[theta / 360 * Num], 'ro', label='Eq.')
# Thermal energy from the equilibrium position
ax.plot(Theta, np.zeros(Num) + E[theta / 360 * Num] + np.log(f0 * tau_mes) * k_B * self.T, '-y', label='25k_B T')
# Legend / Axis
ax.set_xlabel("Theta M_f(deg)")
ax.set_ylabel("E (J)")
ax.legend()
pl.grid()
# Saving and closing
pdf.savefig()
pl.close()
else:
self.logger.warn("Cannot plot energy landscape.")
def export(self, path):
"""
Export the data cycle to the path.
"""
# Define the filename
file = "{0}/cycle_{1}_T{2}_phi{3}.dat".format(path, self.model, round(self.T, 3), round(np.degrees(self.phi), 3) )
# Info
self.logger.info("Exporting cycle data : {}".format(file))
# Exporting
header = "Model = {0} \nT = {1}K \nphi = {2} deg \nMs = {3} A/m\n\n".format(self.model, self.T, np.degrees(self.phi), self.Ms)
header +="H (A/m) \t\tMt (Am**2) \t\tMl (Am**2) \t\t(theta, alpha, ...) (rad)"
np.savetxt(file, self.data, delimiter='\t', header=header, comments='# ')
def import_data(self, path):
"""
Import the data cycle from the given path.
"""
# Define the filename
file = "{0}/cycle_{1}_T{2}_phi{3}.dat".format(
path,
self.model,
round(self.T, 3),
round(np.degrees(self.phi), 3)
)
# Info
self.logger.debug("Importing cycle data : {}".format(file))
# Exporting
self.data = np.loadtxt(file)
class Rotation(object):
def __init__(self, phiTab):
"""
Initiate the rotation.
Contains all the cycles.
Arguments :
— phiTab : phi array
self.data : [cycle.phi, Hc, He, Mr1, Mr2, Mt1, Mt2]
"""
# Creating logger for all children classes
self.logger = init_log(__name__)
# Storing phiTab
self.phi = phiTab
# Creating the cycles array
self.cycles = np.empty(phiTab.size, dtype='object')
def info(self, sample):
"""
Save cycles conditions : T, model, ...
"""
self.T = sample.T
self.model = sample.model
self.Ms = sample.Ms
def sum(self, rot, selfdensity, density):
"""
Method for summing two Rotation objects.
"""
print("rot sum", density)
# Checking if the Rotations are at the same temperature
if (self.T != rot.T):
self.logger.warn("Sum two rotations with different temperature.")
try:
if not (self.phi == rot.phi).all():
self.logger.error("The rotations does not have the same phi values.")
self.logger.warn("Summing anyway.")
except AttributeError as err:
self.logger.error("The rotations does not seem tohave the same phi array.\n{}".format(err))
#if self is rot:
#print("same rot")
# Summing
for i, cycle in enumerate(self.cycles):
cycle.sum(rot.cycles[i], selfdensity, density)
def plot(
self,
path, plot_azimuthal=True,
plot_cycles=True,
plot_energyPath=True,
plot_energyLandscape=True
):
"""
Plot the azimutal properties.
"""
self.logger.info("Plotting rotationnal data in {} folder".format(path))
# Plotting cycles graph
for i, cycle in enumerate(self.cycles):
# Cycle
if plot_cycles:
self.logger.info("Plotting cycles...")
cycle.plot(path)
# Plotting path taken by the magnetization
if plot_energyPath:
self.logger.info("Plotting energy path...")
cycle.plot_energyPath(path)
# Energy landscape
if plot_energyLandscape:
self.logger.info("Plotting energy landscape...")
cycle.plot_energyLandscape(path)
# Freeing memory
if hasattr(cycle, 'energy') and not plot_energyPath:
self.logger.debug("Freeing memory.")
del(cycle.energy)
# Plotting azimuthal
if plot_azimuthal:
# Plotting azimutal data
file = "{0}/azimuthal_{1}_T{2}.pdf".format(
path, self.model, round(self.T, 0)
)
data = self.data
fig = pl.figure()
fig.set_size_inches(18.5, 18.5)
coer = fig.add_subplot(221, polar=True)
coer.grid(True)
coer.plot(
data[:, 0], np.abs(data[:, 1]) * mu_0 * 1e3,
'ro-', label='Bc (mT)'
)
coer.legend()
ex = fig.add_subplot(222, polar=True)
ex.grid(True)
ex.plot(
data[:, 0], np.abs(data[:, 2]) * mu_0 * 1e3,
'bo-', label='Be (mT)'
)
ex.legend()
rem = fig.add_subplot(224, polar=True)
rem.grid(True)
rem.plot(
data[:, 0], np.abs(data[:, 3] / self.Ms),
'mo-', label='Mr_1 / Ms'
)
rem.plot(
data[:, 0], np.abs(data[:, 4] / self.Ms),
'co-', label='Mr_2 / Ms'
)
rem.legend()
trans = fig.add_subplot(223, polar=True)
trans.grid(True)
trans.plot(
data[:, 0], np.abs(data[:, 5] / self.Ms),
'go-', label='max(Mt)1 (A m**2)'
)
trans.plot(
data[:, 0], np.abs(data[:, 6] / self.Ms),
'yo-', label='max(Mt)2 (A m**2)'
)
trans.legend()
# On trace en exportant
self.logger.info("Plotting azimuthal graph : {}".format(file))
pl.savefig(file, dpi=100)
pl.close(fig)
def process(self):
"""
Calculate the properties for each cycle of the rotation.
The results are stored in self.data
"""
# Data rotation (phi, Hc, He, Mr1, Mr2, Mt_max, Mt_min)
self.data = np.zeros((self.phi.size, 7))
self.data[:, 0] = self.phi
for i, cycle in enumerate(self.cycles):
cycle.calc_properties()
# Storing cycles properties in rotation object
Hc = (cycle.H_coer[1] - cycle.H_coer[0])/2
He = (cycle.H_coer[1] + cycle.H_coer[0])/2
Mr1 = cycle.Mr[0]
Mr2 = cycle.Mr[1]
Mt1 = cycle.Mt_max[0]
Mt2 = cycle.Mt_max[1]
self.data[i] = np.array([cycle.phi, Hc, He, Mr1, Mr2, Mt1, Mt2])
def export(self, path):
"""
Export the data.
"""
# Exporting the cycles
for i, cycle in enumerate(self.cycles):
cycle.export(path)
# Exporting the azimuthal data
file = "{}/azimuthal_{}_T{}.dat".format(
path,
self.model,
#round(self.T, 0)
self.T
)
# Verbose
self.logger.info("Exporting azimuthal data : {}".format(file))
# Export
header = "Model = {0} \nT = {1}K \nMs = {2} A/m\n".format(
self.model,
self.T,
self.Ms
)
header += "phi(rad) \t\tHc (A/m) \t\tHe (A/m) \t\tMr1 (Am**2) \t\t\
Mr2(Am**2) \t\tMt1 (Am**2) \t\tMt2 (Am**2)\n"
np.savetxt(
file, self.data,
delimiter='\t',
header=header,
comments='# '
)
def import_data(self, path):
"""
Import a data file
"""
# Importing the cycles
for i, cycle in enumerate(self.cycles):
cycle.import_data(path)
# Importing the azimuthal data
file = "{}/azimuthal_{}_T{:.1f}.dat".format(
# rounded to 1 dec.
path,
self.model,
# round(self.T, 0) # old rounded number
self.T
)
# Verbose
self.logger.debug("Importing azimuthal data : {}".format(file))
# Import
self.data = np.loadtxt(file)
class Tevol(object):
"""
Class containing the thermal evolution of the different properties.
This class is associated with a sample.
data : (T, phi, [cycle.phi, Hc, He, Mr1, Mr2, Mt1, Mt2])
"""
def __init__(self, vsm):
"""
Initiating the data
"""
# Creating logger for all children classes
self.logger = init_log(__name__)
# Attributes
self.T = vsm.T
self.data = np.zeros((vsm.T.size, vsm.phi.size, 7))
self.model = vsm.sample.model
def extract_data(self, rotations):
# Loop over T
for k, rot in enumerate(rotations):
self.data[k, :, :] = rot.data
def plot_H(self, path):
"""
Plotting the T evolution of Hc and He for each field's direction
"""
# Create the PdfPages object to which we will save the pages:
# The with statement makes sure that the PdfPages object is closed properly at
# the end of the block, even if an Exception occurs.
pdfFile = "{}/Tevol_HcHe.pdf".format(path)
with PdfPages(pdfFile) as pdf:
# Loop over phi
for k, phi in enumerate(self.data[0, :, 0]):
# Creating figure
fig = pl.figure()
fig.set_size_inches(25,10.5)
fig.suptitle("Model : {}, phi= {}deg".format(self.model, np.round(np.degrees(phi), 2) ))
# Hc
ax = fig.add_subplot(121)
#ax.plot(self.T, convert_field(self.data[:, k, 1], 'cgs'), 'ro', label="Hc")
ax.plot(self.T, self.data[:, k, 1] * mu_0 * 1e3, 'ro', label="Hc")
ax.set_xlabel("T (K)")
ax.set_ylabel("Hc (mT)")
ax.legend()
ax.grid()
# He
ax2 = fig.add_subplot(122)
#ax2.plot(self.T, np.round(convert_field(self.data[:, k, 2], 'cgs'), 2), 'go', label="He")
ax2.plot(self.T, self.data[:, k, 2] * mu_0 * 1e3, 'go', label="He")
ax2.set_xlabel("T (K)")
ax2.set_ylabel("He (mT)")
ax2.legend()
ax2.grid()
# Saving and closing
pdf.savefig()
pl.close()
def plot_M(self, path):
"""
Plotting the T evolution of Mr and Mt for each field's direction
"""
# Create the PdfPages object to which we will save the pages:
# The with statement makes sure that the PdfPages object is closed properly at
# the end of the block, even if an Exception occurs.
pdfFile = "{}/Tevol_MrMt.pdf".format(path)
with PdfPages(pdfFile) as pdf:
# Loop over phi
for k, phi in enumerate(self.data[0, :, 0]):
# Creating figure
fig = pl.figure()
fig.set_size_inches(25,10.5)
fig.suptitle("Model : {}, phi= {}deg".format(self.model, np.round(np.degrees(phi), 2) ))
# Hc
ax = fig.add_subplot(121)
ax.set_title("Mr")
ax.plot(self.T, np.abs(self.data[:, k, 3]), 'ro', label="|Mr1|")
ax.plot(self.T, np.abs(self.data[:, k, 4]), 'go', label="|Mr2|")
ax.set_xlabel("T (K)")
ax.set_ylabel("Mr (A.m²)")
ax.legend()
ax.grid()
# He
ax2 = fig.add_subplot(122)
ax2.set_title("Mt")
ax2.plot(self.T, np.abs(self.data[:, k, 5]), 'ro', label="Mt1")
ax2.plot(self.T, np.abs(self.data[:, k, 6]), 'go', label="Mt2")
ax2.set_xlabel("T (K)")
ax2.set_ylabel("Mt (A.m²)")
ax2.legend()
ax2.grid()
# Saving and closing
pdf.savefig()
pl.close()
def export(self, path):
"""
Exporting the temperature evolution for each field direction
"""
self.logger.info("Exporting temperature evolution.")
# Loop over phi
for k, phi in enumerate(self.data[0, :, 0]):
# Verbose
self.logger.info("\t phi = {} deg".format(np.degrees(phi)))
# Filename
file = "{0}/Tevol_{1}_phi{2}.dat".format(path, self.model, round(np.degrees(phi), 1))
# Joining data
T_vert = self.T.reshape((self.T.size, 1))
tab = np.hstack((T_vert, self.data[:, k, 1:]))
# Exporting
header = "Model = {0} \nphi = {1} deg\n".format(self.model, np.degrees(phi))
header +="T (K) \t\t\tHc (A/m) \t\t\tHe (A/m) \t\t\tMr1 (Am**2) \t\t\tMr2 (Am**2) \t\t\tMt1 (Am**2) \t\t\tMt2 (Am**2)\n"
np.savetxt(file, tab, delimiter='\t', header=header, comments='# ')
| gpl-3.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/indexes/accessors.py | 2 | 10714 | """
datetimelike delegation
"""
import numpy as np
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_arraylike,
is_integer_dtype,
is_list_like,
is_period_arraylike,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.algorithms import take_1d
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):
def __init__(self, data, orig):
if not isinstance(data, ABCSeries):
raise TypeError(
"cannot convert an object of type {0} to a "
"datetimelike index".format(type(data))
)
self._parent = data
self.orig = orig
self.name = getattr(data, "name", None)
self._freeze()
def _get_values(self):
data = self._parent
if is_datetime64_dtype(data.dtype):
return DatetimeIndex(data, copy=False, name=self.name)
elif is_datetime64tz_dtype(data.dtype):
return DatetimeIndex(data, copy=False, name=self.name)
elif is_timedelta64_dtype(data.dtype):
return TimedeltaIndex(data, copy=False, name=self.name)
else:
if is_period_arraylike(data):
# TODO: use to_period_array
return PeriodArray(data, copy=False)
if is_datetime_arraylike(data):
return DatetimeIndex(data, copy=False, name=self.name)
raise TypeError(
"cannot convert an object of type {0} to a "
"datetimelike index".format(type(data))
)
def _delegate_property_get(self, name):
from pandas import Series
values = self._get_values()
result = getattr(values, name)
# maybe need to upcast (ints)
if isinstance(result, np.ndarray):
if is_integer_dtype(result):
result = result.astype("int64")
elif not is_list_like(result):
return result
result = np.asarray(result)
# blow up if we operate on categories
if self.orig is not None:
result = take_1d(result, self.orig.cat.codes)
index = self.orig.index
else:
index = self._parent.index
# return the result as a Series, which is by definition a copy
result = Series(result, index=index, name=self.name)
# setting this object will show a SettingWithCopyWarning/Error
result._is_copy = (
"modifications to a property of a datetimelike "
"object are not supported and are discarded. "
"Change values on the original."
)
return result
def _delegate_property_set(self, name, value, *args, **kwargs):
raise ValueError(
"modifications to a property of a datetimelike "
"object are not supported. Change values on the "
"original."
)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
values = self._get_values()
method = getattr(values, name)
result = method(*args, **kwargs)
if not is_list_like(result):
return result
result = Series(result, index=self._parent.index, name=self.name)
# setting this object will show a SettingWithCopyWarning/Error
result._is_copy = (
"modifications to a method of a datetimelike "
"object are not supported and are discarded. "
"Change values on the original."
)
return result
@delegate_names(
delegate=DatetimeArray, accessors=DatetimeArray._datetimelike_ops, typ="property"
)
@delegate_names(
delegate=DatetimeArray, accessors=DatetimeArray._datetimelike_methods, typ="method"
)
class DatetimeProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.hour
>>> s.dt.second
>>> s.dt.quarter
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
"""
def to_pydatetime(self):
"""
Return the data as an array of native Python datetime objects.
Timezone information is retained if present.
.. warning::
Python's datetime uses microsecond resolution, which is lower than
pandas (nanosecond). The values are truncated.
Returns
-------
numpy.ndarray
Object dtype array containing native Python datetime objects.
See Also
--------
datetime.datetime : Standard library value for a datetime.
Examples
--------
>>> s = pd.Series(pd.date_range('20180310', periods=2))
>>> s
0 2018-03-10
1 2018-03-11
dtype: datetime64[ns]
>>> s.dt.to_pydatetime()
array([datetime.datetime(2018, 3, 10, 0, 0),
datetime.datetime(2018, 3, 11, 0, 0)], dtype=object)
pandas' nanosecond precision is truncated to microseconds.
>>> s = pd.Series(pd.date_range('20180310', periods=2, freq='ns'))
>>> s
0 2018-03-10 00:00:00.000000000
1 2018-03-10 00:00:00.000000001
dtype: datetime64[ns]
>>> s.dt.to_pydatetime()
array([datetime.datetime(2018, 3, 10, 0, 0),
datetime.datetime(2018, 3, 10, 0, 0)], dtype=object)
"""
return self._get_values().to_pydatetime()
@property
def freq(self):
return self._get_values().inferred_freq
@delegate_names(
delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_ops, typ="property"
)
@delegate_names(
delegate=TimedeltaArray,
accessors=TimedeltaArray._datetimelike_methods,
typ="method",
)
class TimedeltaProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.hours
>>> s.dt.seconds
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
"""
def to_pytimedelta(self):
"""
Return an array of native `datetime.timedelta` objects.
Python's standard `datetime` library uses a different representation
timedelta's. This method converts a Series of pandas Timedeltas
to `datetime.timedelta` format with the same length as the original
Series.
Returns
-------
a : numpy.ndarray
Array of 1D containing data with `datetime.timedelta` type.
See Also
--------
datetime.timedelta
Examples
--------
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))
>>> s
0 0 days
1 1 days
2 2 days
3 3 days
4 4 days
dtype: timedelta64[ns]
>>> s.dt.to_pytimedelta()
array([datetime.timedelta(0), datetime.timedelta(1),
datetime.timedelta(2), datetime.timedelta(3),
datetime.timedelta(4)], dtype=object)
"""
return self._get_values().to_pytimedelta()
@property
def components(self):
"""
Return a Dataframe of the components of the Timedeltas.
Returns
-------
DataFrame
Examples
--------
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='s'))
>>> s
0 00:00:00
1 00:00:01
2 00:00:02
3 00:00:03
4 00:00:04
dtype: timedelta64[ns]
>>> s.dt.components
days hours minutes seconds milliseconds microseconds nanoseconds
0 0 0 0 0 0 0 0
1 0 0 0 1 0 0 0
2 0 0 0 2 0 0 0
3 0 0 0 3 0 0 0
4 0 0 0 4 0 0 0
""" # noqa: E501
return self._get_values().components.set_index(self._parent.index)
@property
def freq(self):
return self._get_values().inferred_freq
@delegate_names(
delegate=PeriodArray, accessors=PeriodArray._datetimelike_ops, typ="property"
)
@delegate_names(
delegate=PeriodArray, accessors=PeriodArray._datetimelike_methods, typ="method"
)
class PeriodProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.hour
>>> s.dt.second
>>> s.dt.quarter
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
"""
class CombinedDatetimelikeProperties(
DatetimeProperties, TimedeltaProperties, PeriodProperties
):
def __new__(cls, data):
# CombinedDatetimelikeProperties isn't really instantiated. Instead
# we need to choose which parent (datetime or timedelta) is
# appropriate. Since we're checking the dtypes anyway, we'll just
# do all the validation here.
from pandas import Series
if not isinstance(data, Series):
raise TypeError(
"cannot convert an object of type {0} to a "
"datetimelike index".format(type(data))
)
orig = data if is_categorical_dtype(data) else None
if orig is not None:
data = Series(orig.values.categories, name=orig.name, copy=False)
try:
if is_datetime64_dtype(data.dtype):
return DatetimeProperties(data, orig)
elif is_datetime64tz_dtype(data.dtype):
return DatetimeProperties(data, orig)
elif is_timedelta64_dtype(data.dtype):
return TimedeltaProperties(data, orig)
elif is_period_arraylike(data):
return PeriodProperties(data, orig)
elif is_datetime_arraylike(data):
return DatetimeProperties(data, orig)
except Exception:
pass # we raise an attribute error anyway
raise AttributeError("Can only use .dt accessor with datetimelike " "values")
| apache-2.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/matplotlib/axes/_subplots.py | 10 | 8310 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map
from matplotlib.gridspec import GridSpec, SubplotSpec
from matplotlib import docstring
import matplotlib.artist as martist
from matplotlib.axes._axes import Axes
import warnings
from matplotlib.cbook import mplDeprecation
class SubplotBase(object):
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args) == 1:
if isinstance(args[0], SubplotSpec):
self._subplotspec = args[0]
else:
try:
s = str(int(args[0]))
rows, cols, num = list(map(int, s))
except ValueError:
raise ValueError(
'Single argument to subplot must be a 3-digit '
'integer')
self._subplotspec = GridSpec(rows, cols)[num - 1]
# num - 1 for converting from MATLAB to python indexing
elif len(args) == 3:
rows, cols, num = args
rows = int(rows)
cols = int(cols)
if isinstance(num, tuple) and len(num) == 2:
num = [int(n) for n in num]
self._subplotspec = GridSpec(rows, cols)[num[0] - 1:num[1]]
else:
if num < 1 or num > rows*cols:
raise ValueError(
"num must be 1 <= num <= {maxn}, not {num}".format(
maxn=rows*cols, num=num))
self._subplotspec = GridSpec(rows, cols)[int(num) - 1]
# num - 1 for converting from MATLAB to python indexing
else:
raise ValueError('Illegal argument(s) to subplot: %s' % (args,))
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def __reduce__(self):
# get the first axes class which does not
# inherit from a subplotbase
def not_subplotbase(c):
return issubclass(c, Axes) and not issubclass(c, SubplotBase)
axes_class = [c for c in self.__class__.mro()
if not_subplotbase(c)][0]
r = [_PicklableSubplotClassConstructor(),
(axes_class,),
self.__getstate__()]
return tuple(r)
def get_geometry(self):
"""get the subplot geometry, e.g., 2,2,3"""
rows, cols, num1, num2 = self.get_subplotspec().get_geometry()
return rows, cols, num1 + 1 # for compatibility
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
"""change subplot geometry, e.g., from 1,1,1 to 2,2,3"""
self._subplotspec = GridSpec(numrows, numcols)[num - 1]
self.update_params()
self.set_position(self.figbox)
def get_subplotspec(self):
"""get the SubplotSpec instance associated with the subplot"""
return self._subplotspec
def set_subplotspec(self, subplotspec):
"""set the SubplotSpec instance associated with the subplot"""
self._subplotspec = subplotspec
def update_params(self):
"""update the subplot position from fig.subplotpars"""
self.figbox, self.rowNum, self.colNum, self.numRows, self.numCols = \
self.get_subplotspec().get_position(self.figure,
return_all=True)
def is_first_col(self):
return self.colNum == 0
def is_first_row(self):
return self.rowNum == 0
def is_last_row(self):
return self.rowNum == self.numRows - 1
def is_last_col(self):
return self.colNum == self.numCols - 1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
def _make_twin_axes(self, *kl, **kwargs):
"""
make a twinx axes of self. This is used for twinx and twiny.
"""
from matplotlib.projections import process_projection_requirements
kl = (self.get_subplotspec(),) + kl
projection_class, kwargs, key = process_projection_requirements(
self.figure, *kl, **kwargs)
ax2 = subplot_class_factory(projection_class)(self.figure,
*kl, **kwargs)
self.figure.add_subplot(ax2)
return ax2
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubplotBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = type(str("%sSubplot") % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
class _PicklableSubplotClassConstructor(object):
"""
This stub class exists to return the appropriate subplot
class when __call__-ed with an axes class. This is purely to
allow Pickling of Axes and Subplots.
"""
def __call__(self, axes_class):
# create a dummy object instance
subplot_instance = _PicklableSubplotClassConstructor()
subplot_class = subplot_class_factory(axes_class)
# update the class to the desired subplot class
subplot_instance.__class__ = subplot_class
return subplot_instance
docstring.interpd.update(Axes=martist.kwdoc(Axes))
docstring.interpd.update(Subplot=martist.kwdoc(Axes))
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
| apache-2.0 |
niliafsari/KSP-SN | Gemini_N2997.py | 1 | 2977 | import os
import glob
import subprocess
import commands
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.ticker import AutoMinorLocator
from dophot import *
from findSN import *
from astropy.time import Time
from moon import *
import csv
import sys
sys.path.insert(0, '/home/afsari/')
from SNAP import Astrometry
from SNAP.Analysis import *
current_path=os.path.dirname(os.path.abspath(__file__))
matplotlib.rcParams.update({'font.size': 18})
matplotlib.rcParams['axes.linewidth'] = 1.5 #set the value globally
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['xtick.major.width'] = 2
matplotlib.rcParams['xtick.minor.size'] = 2
matplotlib.rcParams['xtick.minor.width'] = 1.5
matplotlib.rcParams['ytick.major.size'] = 5
matplotlib.rcParams['ytick.major.width'] = 2
matplotlib.rcParams['ytick.minor.size'] = 2
matplotlib.rcParams['ytick.minor.width'] = 1.5
coef = {'B': 3.626, 'V': 2.742, 'I': 1.505, 'i': 1.698}
coef = {'B': 4.315, 'V': 3.315, 'I': 1.940, 'i': 2.086}
data_B = np.genfromtxt(current_path+ '/phot_csv/N2997-B_v1.csv', delimiter=',')
data_V = np.genfromtxt(current_path+'/phot_csv//N2997-V_v1.csv', delimiter=',')
data_I = np.genfromtxt(current_path+'/phot_csv//N2997-I_v1.csv', delimiter=',')
#data_I[data_I[:,9] < 18.3,:]=[]
data_I=np.delete(data_I, np.where(data_I[:,9] < 18.3), axis=0)
ax = plt.subplot(111)
u=np.argmin(data_B[:,4])
plt.errorbar(data_B[:,4][(data_B[:,9] < data_B[:,11])]-data_B[u,4],data_B[:,9][(data_B[:,9] < data_B[:,11])],yerr=data_B[:,10][(data_B[:,9] < data_B[:,11])],color='blue',label='B-band',fmt='o',markersize=10,markeredgecolor='black',
markeredgewidth=1.2)
plt.errorbar(data_V[:,4][(data_V[:,9] < data_V[:,11])]-data_B[u,4],data_V[:,9][(data_V[:,9] < data_V[:,11])],yerr=data_V[:,10][(data_V[:,9] < data_V[:,11])],color='green',label='V-band',fmt='o',markersize=10,markeredgecolor='black',
markeredgewidth=1.2)
plt.errorbar(data_I[:,4][(data_I[:,9] < data_I[:,11])]-data_B[u,4],data_I[:,9][(data_I[:,9] < data_I[:,11])],yerr=data_I[:,10][(data_I[:,9] < data_I[:,11])],color='red',label='I-band',fmt='o',markersize=10,markeredgecolor='black',
markeredgewidth=1.2)
#plt.axis([720-365,855-365,18,23.2])
ax.set_xlim([-1, 10])
ax.set_ylim([18, 20.7])
plt.gca().invert_yaxis()
plt.xlabel('Time [days]')
plt.ylabel('Apparent Magnitude (Vega)')
ax.legend(loc='best',ncol=1, fancybox=True,fontsize=14, frameon=False)
#ax1.spines[side].set_linewidth(size)
ax.yaxis.set_minor_locator(AutoMinorLocator(10))
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.xaxis.set_tick_params(width=1.5)
ax.yaxis.set_tick_params(width=1.5)
plt.xticks(np.arange(-1, 10, 1.0))
y=np.arange(18, 21,0.2)
ax.fill_betweenx(y,x1=4,x2=5, color='y',alpha=0.5,zorder=0)
plt.annotate('KSP-N2997-1_2018oh', xy=(0.3, 0.05), xycoords='axes fraction')
ax.text(4.4, 20, '2018A', fontsize=17, rotation='vertical',multialignment='center',va='center')
plt.tight_layout()
plt.show() | bsd-3-clause |
mpanteli/music-outliers | scripts/load_dataset.py | 1 | 6893 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 15 22:52:57 2017
@author: mariapanteli
"""
import os
import numpy as np
import pandas as pd
import pickle
from sklearn.model_selection import train_test_split
import extract_primary_features
import load_features
import util_filter_dataset
WIN_SIZE = 8
DATA_DIR = 'data'
METADATA_FILE = os.path.join(DATA_DIR, 'metadata.csv')
OUTPUT_FILES = [os.path.join(DATA_DIR, 'train_data_'+str(WIN_SIZE)+'.pickle'),
os.path.join(DATA_DIR, 'val_data_'+str(WIN_SIZE)+'.pickle'),
os.path.join(DATA_DIR, 'test_data_'+str(WIN_SIZE)+'.pickle')]
def get_train_val_test_idx(X, Y, seed=None):
""" Split in train, validation, test sets.
Parameters
----------
X : np.array
Data or indices.
Y : np.array
Class labels for data in X.
seed: int
Random seed.
Returns
-------
(X_train, Y_train) : tuple
Data X and labels y for the train set
(X_val, Y_val) : tuple
Data X and labels y for the validation set
(X_test, Y_test) : tuple
Data X and labels y for the test set
"""
X_train, X_val_test, Y_train, Y_val_test = train_test_split(X, Y, train_size=0.6, random_state=seed, stratify=Y)
X_val, X_test, Y_val, Y_test = train_test_split(X_val_test, Y_val_test, train_size=0.5, random_state=seed, stratify=Y_val_test)
return (X_train, Y_train), (X_val, Y_val), (X_test, Y_test)
def subset_labels(Y, N_min=10, N_max=100, seed=None):
""" Subset dataset to contain minimum N_min and maximum N_max instances
per class. Return indices for this subset.
Parameters
----------
Y : np.array
Class labels
N_min : int
Minimum instances per class
N_max : int
Maximum instances per class
seed: int
Random seed.
Returns
-------
subset_idx : np.array
Indices for a subset with classes of size bounded by N_min, N_max
"""
np.random.seed(seed=seed)
subset_idx = []
labels = np.unique(Y)
for label in labels:
label_idx = np.where(Y==label)[0]
counts = len(label_idx)
if counts>=N_max:
subset_idx.append(np.random.choice(label_idx, N_max, replace=False))
elif counts>=N_min and counts<N_max:
subset_idx.append(label_idx)
else:
# not enough samples for this class, skip
print("Found only %s samples from class %s (minimum %s)" % (counts, label, N_min))
continue
if len(subset_idx)>0:
subset_idx = np.concatenate(subset_idx, axis=0)
return subset_idx
else:
raise ValueError('No classes found with minimum %s samples' % N_min)
def check_extract_primary_features(df):
""" Check if csv files for melspectrograms, chromagrams, melodia, speech/music segmentation exist,
if the csv files don't exist, extract them.
Parameters
----------
df : pd.DataFrame
Metadata including class label and path to audio, melspec, chroma
"""
extract_melspec, extract_chroma, extract_melodia, extract_speech = False, False, False, False
# TODO: Check for each file in df, instead of just the first one
if os.path.exists(df['Audio'].iloc[0]):
if not os.path.exists(df['Melspec'].iloc[0]):
extract_melspec = True
if not os.path.exists(df['Chroma'].iloc[0]):
extract_chroma = True
if not os.path.exists(df['Melodia'].iloc[0]):
extract_melodia = True
if not os.path.exists(df['Speech'].iloc[0]):
extract_speech = True
else:
print("Audio file %s does not exist. Primary features will not be extracted." % df['Audio'].iloc[0])
extract_primary_features.extract_features_for_file_list(df,
melspec=extract_melspec, chroma=extract_chroma, melodia=extract_melodia, speech=extract_speech)
def extract_features(df, win2sec=8.0):
""" Extract features from melspec and chroma.
Parameters
----------
df : pd.DataFrame
Metadata including class label and path to audio, melspec, chroma
win2sec : float
The window size for the second frame decomposition of the features
Returns
-------
X : np.array
The features for every frame x every audio file in the dataset
Y : np.array
The class labels for every frame in the dataset
Y_audio : np.array
The audio labels
"""
feat_loader = load_features.FeatureLoader(win2sec=win2sec)
frames_rhy, frames_mfcc, frames_chroma, frames_mel, Y_df, Y_audio_df = feat_loader.get_features(df, precomp_melody=False)
print frames_rhy.shape, frames_mel.shape, frames_mfcc.shape, frames_chroma.shape
X = np.concatenate((frames_rhy, frames_mel, frames_mfcc, frames_chroma), axis=1)
Y = Y_df.get_values()
Y_audio = Y_audio_df.get_values()
return X, Y, Y_audio
def sample_dataset(df):
""" Select min 10 - max 100 recs from each country.
Parameters
----------
df : pd.DataFrame
The metadata (including country) of the tracks.
Returns
-------
df : pd.DataFrame
The metadata for the selected subset of tracks.
"""
df = util_filter_dataset.remove_missing_data(df)
subset_idx = subset_labels(df['Country'].get_values())
df = df.iloc[subset_idx, :]
return df
def features_for_train_test_sets(df, write_output=False):
"""Split in train/val/test sets, extract features and write output files.
Parameters
-------
df : pd.DataFrame
The metadata for the selected subset of tracks.
write_output : boolean
Whether to write files with the extracted features for train/val/test sets.
"""
X_idx, Y = np.arange(len(df)), df['Country'].get_values()
extract_features(df.iloc[np.array([0])], win2sec=WIN_SIZE)
train_set, val_set, test_set = get_train_val_test_idx(X_idx, Y)
X_train, Y_train, Y_audio_train = extract_features(df.iloc[train_set[0]], win2sec=WIN_SIZE)
X_val, Y_val, Y_audio_val = extract_features(df.iloc[val_set[0]], win2sec=WIN_SIZE)
X_test, Y_test, Y_audio_test = extract_features(df.iloc[test_set[0]], win2sec=WIN_SIZE)
train = [X_train, Y_train, Y_audio_train]
val = [X_val, Y_val, Y_audio_val]
test = [X_test, Y_test, Y_audio_test]
if write_output:
with open(OUTPUT_FILES[0], 'wb') as f:
pickle.dump(train, f)
with open(OUTPUT_FILES[1], 'wb') as f:
pickle.dump(val, f)
with open(OUTPUT_FILES[2], 'wb') as f:
pickle.dump(test, f)
return train, val, test
if __name__ == '__main__':
# load dataset
df = pd.read_csv(METADATA_FILE)
check_extract_primary_features(df)
df = sample_dataset(df)
train, val, test = features_for_train_test_sets(df, write_output=True)
| mit |
yaroslavvb/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 137 | 2219 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
albertoferna/compmech | compmech/panels/kpanels/kpanelt/kpanelt.py | 1 | 54711 | from __future__ import division
import gc
import os
import sys
import traceback
from collections import Iterable
import time
import cPickle
import __main__
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import eigsh
from scipy.optimize import leastsq
from numpy import linspace, cos, sin, deg2rad
import compmech.composite.laminate as laminate
from compmech.analysis import Analysis
from compmech.logger import msg, warn
from compmech.constants import DOUBLE
from compmech.sparse import (make_symmetric, solve, remove_null_cols,
is_symmetric)
import modelDB
def load(name):
if '.KPanelT' in name:
return cPickle.load(open(name, 'rb'))
else:
return cPickle.load(open(name + '.KPanelT', 'rb'))
class KPanelT(object):
r"""Conical (Konus) panel using trigonometric series
The approximation functions for the displacement field are:
.. math::
\begin{tabular}{l c r}
CLPT & FSDT \\
\hline
$u$ & $u$ \\
$v$ & $v$ \\
$w$ & $w$ \\
$NA$ & $\phi_x$ \\
$NA$ & $\phi_\theta$ \\
\end{tabular}
with:
.. math::
u = \sum_{i_1=0}^{m_1}{\sum_{j_1=0}^{n_1}{f}}
\\
v = \sum_{i_1=0}^{m_1}{\sum_{j_1=0}^{n_1}{f}}
\\
w = \sum_{i_1=0}^{m_1}{\sum_{j_1=0}^{n_1}{f}}
\\
\phi_x = \sum_{i_1=0}^{m_1}{\sum_{j_1=0}^{n_1}{f}}
\\
\phi_\theta = \sum_{i_1=0}^{m_1}{\sum_{j_1=0}^{n_1}{f}}
\\
f = cos(i_1 \pi b_x)cos(j_1 \pi b_\theta)
\\
b_x = \frac{x + \frac{L}{2}}{L}
\\
b_\theta = \frac{\theta - \theta_{min}}{\theta_{max}-\theta_{min}}
"""
def __init__(self):
self.name = ''
self.alphadeg = 0.
self.alpharad = 0.
self.is_cylinder = None
# boundary conditions
self.inf = 1.e8 # used to define high stiffnesses
self.zero = 0. # used to define zero stiffnesses
self.bc = None
self.kuBot = self.inf
self.kvBot = self.inf
self.kwBot = self.inf
self.kphixBot = 0.
self.kphitBot = 0.
self.kuTop = self.inf
self.kvTop = self.inf
self.kwTop = self.inf
self.kphixTop = 0.
self.kphitTop = 0.
self.kuLeft = self.inf
self.kvLeft = self.inf
self.kwLeft = self.inf
self.kphixLeft = 0.
self.kphitLeft = 0.
self.kuRight = self.inf
self.kvRight = self.inf
self.kwRight = self.inf
self.kphixRight = 0.
self.kphitRight = 0.
# default equations
self.model = 'clpt_donnell_bc4'
# approximation series
self.m1 = 40
self.n1 = 40
# numerical integration
self.nx = 160
self.nt = 160
self.ni_num_cores = 4
self.ni_method = 'trapz2d'
# analytical integration for cones
self.s = 79
# loads
self.Fx = None
self.Ft = None
self.Fxt = None
self.Ftx = None
self.NxxTop = None
self.NxtTop = None
self.NttLeft = None
self.NtxLeft = None
self.Fx_inc = None
self.Ft_inc = None
self.Fxt_inc = None
self.Ftx_inc = None
self.NxxTop_inc = None
self.NxtTop_inc = None
self.NttLeft_inc = None
self.NtxLeft_inc = None
self.forces = []
self.forces_inc = []
# initial imperfection
self.c0 = None
self.m0 = 0
self.n0 = 0
self.funcnum = 2
self.r1 = None
self.r2 = None
self.L = None
self.tmindeg = None
self.tmaxdeg = None
self.tminrad = None
self.tmaxrad = None
self.K = 5/6.
self.sina = None
self.cosa = None
# material
self.laminaprop = None
self.plyt = None
self.laminaprops = []
self.stack = []
self.plyts = []
# constitutive law
self.F = None
self.force_orthotropic_laminate = False
# eigenvalue analysis
self.num_eigvalues = 50
self.num_eigvalues_print = 5
# output queries
self.out_num_cores = 4
# analysis
self.analysis = Analysis(self.calc_fext, self.calc_k0, self.calc_fint,
self.calc_kT)
# outputs
self.increments = None
self.cs = None
self.eigvecs = None
self.eigvals = None
self._clear_matrices()
def _clear_matrices(self):
self.k0 = None
self.kT = None
self.kG0 = None
self.kG0_Fx = None
self.kG0_Ft = None
self.kG0_Fxt = None
self.kG0_Ftx = None
self.kG = None
self.kL = None
self.lam = None
self.u = None
self.v = None
self.w = None
self.phix = None
self.phit = None
self.Xs = None
self.Ts = None
gc.collect()
def _rebuild(self):
if not self.name:
try:
self.name = os.path.basename(__main__.__file__).split('.py')[0]
except AttributeError:
warn('KPanelT name unchanged')
self.model = self.model.lower()
valid_models = sorted(modelDB.db.keys())
if not self.model in valid_models:
raise ValueError('ERROR - valid models are:\n ' +
'\n '.join(valid_models))
# boundary conditions
inf = self.inf
zero = self.zero
if inf > 1.e8:
warn('inf reduced to 1.e8 due to the verified ' +
'numerical instability for higher values', level=2)
inf = 1.e8
if self.bc is not None:
bc = self.bc.lower()
if '_' in bc:
# different bc for Bot, Top, Left and Right
bc_Bot, bc_Top, bc_Left, bc_Right = self.bc.split('_')
elif '-' in bc:
# different bc for Bot, Top, Left and Right
bc_Bot, bc_Top, bc_Left, bc_Right = self.bc.split('-')
else:
bc_Bot = bc_Top = bc_Left = bc_Right = bc
bcs = dict(bc_Bot=bc_Bot, bc_Top=bc_Top,
bc_Left=bc_Left, bc_Right=bc_Right)
for k in bcs.keys():
sufix = k.split('_')[1] # Bot or Top
if bcs[k] == 'ss1':
setattr(self, 'ku' + sufix, inf)
setattr(self, 'kv' + sufix, inf)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, zero)
setattr(self, 'kphit' + sufix, zero)
elif bcs[k] == 'ss2':
setattr(self, 'ku' + sufix, zero)
setattr(self, 'kv' + sufix, inf)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, zero)
setattr(self, 'kphit' + sufix, zero)
elif bcs[k] == 'ss3':
setattr(self, 'ku' + sufix, inf)
setattr(self, 'kv' + sufix, zero)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, zero)
setattr(self, 'kphit' + sufix, zero)
elif bcs[k] == 'ss4':
setattr(self, 'ku' + sufix, zero)
setattr(self, 'kv' + sufix, zero)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, zero)
setattr(self, 'kphit' + sufix, zero)
elif bcs[k] == 'cc1':
setattr(self, 'ku' + sufix, inf)
setattr(self, 'kv' + sufix, inf)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, inf)
setattr(self, 'kphit' + sufix, inf)
elif bcs[k] == 'cc2':
setattr(self, 'ku' + sufix, zero)
setattr(self, 'kv' + sufix, inf)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, inf)
setattr(self, 'kphit' + sufix, inf)
elif bcs[k] == 'cc3':
setattr(self, 'ku' + sufix, inf)
setattr(self, 'kv' + sufix, zero)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, inf)
setattr(self, 'kphit' + sufix, inf)
elif bcs[k] == 'cc4':
setattr(self, 'ku' + sufix, zero)
setattr(self, 'kv' + sufix, zero)
setattr(self, 'kw' + sufix, inf)
setattr(self, 'kphix' + sufix, inf)
setattr(self, 'kphit' + sufix, inf)
elif bcs[k] == 'free':
setattr(self, 'ku' + sufix, zero)
setattr(self, 'kv' + sufix, zero)
setattr(self, 'kw' + sufix, zero)
setattr(self, 'kphix' + sufix, zero)
setattr(self, 'kphit' + sufix, zero)
else:
txt = '"{}" is not a valid boundary condition!'.format(bc)
raise ValueError(txt)
self.tminrad = deg2rad(self.tmindeg)
self.tmaxrad = deg2rad(self.tmaxdeg)
self.alpharad = deg2rad(self.alphadeg)
self.sina = sin(self.alpharad)
self.cosa = cos(self.alpharad)
if self.L is None:
raise ValueError('The length L must be specified')
if not self.r2:
if not self.r1:
raise ValueError('Radius r1 or r2 must be specified')
else:
self.r2 = self.r1 - self.L*self.sina
else:
self.r1 = self.r2 + self.L*self.sina
if not self.laminaprops:
self.laminaprops = [self.laminaprop for i in self.stack]
if not self.plyts:
self.plyts = [self.plyt for i in self.stack]
if self.alpharad == 0:
self.is_cylinder = True
else:
self.is_cylinder = False
def check_load(load, size):
if load is not None:
check = False
if isinstance(load, np.ndarray):
if load.ndim == 1:
assert load.shape[0] == size
return load
elif type(load) in (int, float):
newload = np.zeros(size, dtype=DOUBLE)
newload[0] = load
return newload
if not check:
raise ValueError('Invalid NxxTop input')
else:
return np.zeros(size, dtype=DOUBLE)
# axial load
size = self.n1+1
self.NxxTop = check_load(self.NxxTop, size)
self.NxxTop_inc = check_load(self.NxxTop_inc, size)
# shear xt
self.NxtTop = check_load(self.NxtTop, size)
self.NxtTop_inc = check_load(self.NxtTop_inc, size)
# circumferential load
size = self.m1+1
self.NttLeft = check_load(self.NttLeft, size)
self.NttLeft_inc = check_load(self.NttLeft_inc, size)
# shear tx
self.NtxLeft = check_load(self.NtxLeft, size)
self.NtxLeft_inc = check_load(self.NtxLeft_inc, size)
# defining load components from force vectors
tmin = self.tminrad
tmax = self.tmaxrad
if self.Fx is not None:
self.NxxTop[0] = self.Fx/((tmax - tmin)*self.r2)
msg('NxxTop[0] calculated from Fx', level=2)
if self.Fx_inc is not None:
self.NxxTop_inc[0] = self.Fx_inc/((tmax - tmin)*self.r2)
msg('NxxTop_inc[0] calculated from Fx_inc', level=2)
if self.Fxt is not None:
self.NxtTop[0] = self.Fxt/((tmax - tmin)*self.r2)
msg('NxtTop[0] calculated from Fxt', level=2)
if self.Fxt_inc is not None:
self.NxtTop_inc[0] = self.Fxt_inc/((tmax - tmin)*self.r2)
msg('NxtTop_inc[0] calculated from Fxt_inc', level=2)
if self.Ft is not None:
self.NttLeft[0] = self.Ft/self.L
msg('NttLeft[0] calculated from Ft', level=2)
if self.Ft_inc is not None:
self.NttLeft_inc[0] = self.Ft_inc/self.L
msg('NttLeft_inc[0] calculated from Ft_inc', level=2)
if self.Ftx is not None:
self.NtxLeft[0] = self.Ftx/self.L
msg('NtxLeft[0] calculated from Ftx', level=2)
if self.Ftx_inc is not None:
self.NtxLeft_inc[0] = self.Ftx_inc/self.L
msg('NtxLeft_inc[0] calculated from Ftx_inc', level=2)
if self.laminaprop is None:
raise ValueError('laminaprop must be defined')
def get_size(self):
r"""Calculates the size of the stiffness matrices
The size of the stiffness matrices can be interpreted as the number of
rows or columns, recalling that this will be the size of the Ritz
constants' vector `\{c\}`, the internal force vector `\{F_{int}\}` and
the external force vector `\{F_{ext}\}`.
Returns
-------
size : int
The size of the stiffness matrices.
"""
num0 = modelDB.db[self.model]['num0']
num1 = modelDB.db[self.model]['num1']
self.size = num0 + num1*self.m1*self.n1
return self.size
def _default_field(self, xs, ts, gridx, gridt):
if xs is None or ts is None:
xs = linspace(-self.L/2., self.L/2, gridx)
ts = linspace(self.tminrad, self.tmaxrad, gridt)
xs, ts = np.meshgrid(xs, ts, copy=False)
xs = np.atleast_1d(np.array(xs, dtype=DOUBLE))
ts = np.atleast_1d(np.array(ts, dtype=DOUBLE))
xshape = xs.shape
tshape = ts.shape
if xshape != tshape:
raise ValueError('Arrays xs and ts must have the same shape')
self.Xs = xs
self.Ts = ts
xs = xs.ravel()
ts = ts.ravel()
return xs, ts, xshape, tshape
def calc_linear_matrices(self, combined_load_case=None):
self._rebuild()
msg('Calculating linear matrices... ', level=2)
fk0, fk0_cyl, fkG0, fkG0_cyl, k0edges = modelDB.get_linear_matrices(
self, combined_load_case)
model = self.model
alpharad = self.alpharad
cosa = self.cosa
r1 = self.r1
r2 = self.r2
L = self.L
tminrad = self.tminrad
tmaxrad = self.tmaxrad
m1 = self.m1
n1 = self.n1
s = self.s
laminaprops = self.laminaprops
plyts = self.plyts
stack = self.stack
Fx = self.NxxTop[0]*((tmaxrad-tminrad)*r2)
Ft = self.NttLeft[0]*L
Fxt = self.NxtTop[0]*((tmaxrad-tminrad)*r2)
Ftx = self.NtxLeft[0]*L
if stack != []:
lam = laminate.read_stack(stack, plyts=plyts,
laminaprops=laminaprops)
if 'clpt' in model:
if lam is not None:
F = lam.ABD
elif 'fsdt' in model:
if lam is not None:
F = lam.ABDE
F[6:, 6:] *= self.K
if self.force_orthotropic_laminate:
msg('')
msg('Forcing orthotropic laminate...', level=2)
F[0, 2] = 0. # A16
F[1, 2] = 0. # A26
F[2, 0] = 0. # A61
F[2, 1] = 0. # A62
F[0, 5] = 0. # B16
F[5, 0] = 0. # B61
F[1, 5] = 0. # B26
F[5, 1] = 0. # B62
F[3, 2] = 0. # B16
F[2, 3] = 0. # B61
F[4, 2] = 0. # B26
F[2, 4] = 0. # B62
F[3, 5] = 0. # D16
F[4, 5] = 0. # D26
F[5, 3] = 0. # D61
F[5, 4] = 0. # D62
if F.shape[0] == 8:
F[6, 7] = 0. # A45
F[7, 6] = 0. # A54
self.lam = lam
self.F = F
if self.is_cylinder:
k0 = fk0_cyl(r1, L, tminrad, tmaxrad, F, m1, n1)
if not combined_load_case:
kG0 = fkG0_cyl(Fx, Ft, Fxt, Ftx, r1, L, tminrad, tmaxrad, m1,
n1)
else:
kG0_Fx = fkG0_cyl(Fx, 0, 0, 0, r1, L, tminrad, tmaxrad, m1, n1)
kG0_Ft = fkG0_cyl(0, Ft, 0, 0, r1, L, tminrad, tmaxrad, m1, n1)
kG0_Fxt = fkG0_cyl(0, 0, Fxt, 0, r1, L, tminrad, tmaxrad,
m1, n1)
kG0_Ftx = fkG0_cyl(0, 0, 0, Ftx, r1, L, tminrad, tmaxrad,
m1, n1)
else:
k0 = fk0(r1, L, tminrad, tmaxrad, F, m1, n1, alpharad, s)
if not combined_load_case:
kG0 = fkG0(Fx, Ft, Fxt, Ftx, r1, L, tminrad, tmaxrad, m1, n1,
alpharad, s)
else:
kG0_Fx = fkG0(Fx, 0, 0, 0, r1, L, tminrad, tmaxrad, m1, n1,
alpharad, s)
kG0_Ft = fkG0(0, Ft, 0, 0, r1, L, tminrad, tmaxrad, m1, n1,
alpharad, s)
kG0_Fxt = fkG0(0, 0, Fxt, 0, r1, L, tminrad, tmaxrad, m1, n1,
alpharad, s)
kG0_Ftx = fkG0(0, 0, 0, Ftx, r1, L, tminrad, tmaxrad, m1, n1,
alpharad, s)
# performing checks for the linear stiffness matrices
assert np.any(np.isnan(k0.data)) == False
assert np.any(np.isinf(k0.data)) == False
k0 = csr_matrix(make_symmetric(k0))
if k0edges is not None:
assert np.any((np.isnan(k0edges.data)
| np.isinf(k0edges.data))) == False
k0edges = csr_matrix(make_symmetric(k0edges))
if k0edges is not None:
msg('Applying elastic constraints!', level=3)
k0 = k0 + k0edges
self.k0 = k0
if not combined_load_case:
assert np.any((np.isnan(kG0.data) | np.isinf(kG0.data))) == False
kG0 = csr_matrix(make_symmetric(kG0))
self.kG0 = kG0
else:
assert np.any((np.isnan(kG0_Fx.data)
| np.isinf(kG0_Fx.data))) == False
assert np.any((np.isnan(kG0_Ft.data)
| np.isinf(kG0_Ft.data))) == False
assert np.any((np.isnan(kG0_Fxt.data)
| np.isinf(kG0_Fxt.data))) == False
assert np.any((np.isnan(kG0_Ftx.data)
| np.isinf(kG0_Ftx.data))) == False
kG0_Fx = csr_matrix(make_symmetric(kG0_Fx))
kG0_Ft = csr_matrix(make_symmetric(kG0_Ft))
kG0_Fxt = csr_matrix(make_symmetric(kG0_Fxt))
kG0_Ftx = csr_matrix(make_symmetric(kG0_Ftx))
self.kG0_Fx = kG0_Fx
self.kG0_Ft = kG0_Ft
self.kG0_Fxt = kG0_Fxt
self.kG0_Ftx = kG0_Ftx
#NOTE forcing Python garbage collector to clean the memory
# it DOES make a difference! There is a memory leak not
# identified, probably in the csr_matrix process
gc.collect()
msg('finished!', level=2)
def lb(self, tol=0, combined_load_case=None, remove_null_i1_j1=False,
sparse_solver=True):
"""Performs a linear buckling analysis
The following parameters of the ``KPanelT`` object will affect the
linear buckling analysis:
======================= =====================================
parameter description
======================= =====================================
``num_eigenvalues`` Number of eigenvalues to be extracted
``num_eigvalues_print`` Number of eigenvalues to print after
the analysis is completed
======================= =====================================
Parameters
----------
combined_load_case : int, optional
It tells whether the linear buckling analysis must be computed
considering combined load cases, each value will tell
the algorithm to rearrange the linear matrices in a different
way. The valid values are ``1``, or ``2``, where:
- ``1`` : find the critical Fx for a fixed Fxt
- ``2`` : find the critical Fx for a fixed Ft
- ``3`` : find the critical Ft for a fixed Ftx
- ``4`` : find the critical Ft for a fixed Fx
remove_null_i1_j1 : bool, optional
It was observed that the eigenvectors can be described using only
the homogeneous part of the approximation functions, which are
obtained with `i_1 > 0` and `j_1 > 0`. Therefore, the terms with
`i_1 = 0` and `j_1 = 0` can be ignored.
sparse_solver : bool, optional
Tells if solver :func:`scipy.linalg.eigh` or
:func:`scipy.sparse.linalg.eigsh` should be used.
Notes
-----
The extracted eigenvalues are stored in the ``eigvals`` parameter
of the ``KPanelT`` object and the `i^{th}` eigenvector in the
``eigvecs[i-1, :]`` parameter.
"""
if not modelDB.db[self.model]['linear buckling']:
msg('________________________________________________')
msg('')
warn('Model {} cannot be used in linear buckling analysis!'.
format(self.model))
msg('________________________________________________')
msg('Running linear buckling analysis...')
self.calc_linear_matrices(combined_load_case=combined_load_case)
msg('Eigenvalue solver... ', level=2)
if not combined_load_case:
M = self.k0
A = self.kG0
elif combined_load_case == 1:
M = self.k0 + self.kG0_Fxt
A = self.kG0_Fx
elif combined_load_case == 2:
M = self.k0 + self.kG0_Ft
A = self.kG0_Fx
elif combined_load_case == 3:
M = self.k0 + self.kG0_Ftx
A = self.kG0_Ft
elif combined_load_case == 4:
M = self.k0 + self.kG0_Fx
A = self.kG0_Ft
if remove_null_i1_j1:
msg('removing rows and columns for i1=0 and j1=0 ...', level=3)
db = modelDB.db
num0 = db[self.model]['num0']
num1 = db[self.model]['num1']
dofs = db[self.model]['dofs']
valid = []
removed = []
for i1 in range(self.m1):
for j1 in range(self.n1):
col = num0 + num1*((j1)*self.m1 + (i1))
if i1 == 0 or j1 == 0:
for dof in range(dofs):
removed.append(col+dof)
else:
for dof in range(dofs):
valid.append(col+dof)
valid.sort()
removed.sort()
A = A[:, valid][valid, :]
M = M[:, valid][valid, :]
msg('finished!', level=3)
if sparse_solver:
mode = 'cayley'
try:
msg('eigsh() solver...', level=3)
eigvals, eigvecs = eigsh(A=A, k=self.num_eigvalues,
which='SM', M=M, tol=tol, sigma=1., mode=mode)
msg('finished!', level=3)
except Exception, e:
warn(str(e), level=4)
msg('aborted!', level=3)
size22 = A.shape[0]
M, A, used_cols = remove_null_cols(M, A)
msg('eigsh() solver...', level=3)
eigvals, peigvecs = eigsh(A=A, k=self.num_eigvalues,
which='SM', M=M, tol=tol, sigma=1., mode=mode)
msg('finished!', level=3)
eigvecs = np.zeros((size22, self.num_eigvalues), dtype=DOUBLE)
eigvecs[used_cols, :] = peigvecs
else:
from scipy.linalg import eigh
size22 = A.shape[0]
M, A, used_cols = remove_null_cols(M, A)
M = M.toarray()
A = A.toarray()
msg('eigh() solver...', level=3)
eigvals, peigvecs = eigh(a=A, b=M)
msg('finished!', level=3)
eigvecs = np.zeros((size22, self.num_eigvalues), dtype=DOUBLE)
eigvecs[used_cols, :] = peigvecs[:, :self.num_eigvalues]
eigvals = -1./eigvals
if remove_null_i1_j1:
eigvecsALL = np.zeros((self.get_size(), self.num_eigvalues),
dtype=DOUBLE)
eigvecsALL[valid, :] = eigvecs
else:
eigvecsALL = eigvecs
self.eigvals = eigvals
self.eigvecs = eigvecsALL
msg('finished!', level=2)
msg('first {} eigenvalues:'.format(self.num_eigvalues_print), level=1)
for eig in eigvals[:self.num_eigvalues_print]:
msg('{}'.format(eig), level=2)
self.analysis.last_analysis = 'lb'
def calc_NL_matrices(self, c, num_cores=None):
r"""Calculates the non-linear stiffness matrices
Parameters
----------
c : np.ndarray
Ritz constants representing the current state to calculate the
stiffness matrices.
num_cores : int, optional
Number of CPU cores used by the algorithm.
Notes
-----
Nothing is returned, the calculated matrices
"""
c = np.ascontiguousarray(c, dtype=DOUBLE)
if num_cores is None:
num_cores = self.ni_num_cores
if self.k0 is None:
self.calc_linear_matrices()
msg('Calculating non-linear matrices...', level=2)
alpharad = self.alpharad
r1 = self.r1
L = self.L
tminrad = self.tminrad
tmaxrad = self.tmaxrad
F = self.F
m1 = self.m1
n1 = self.n1
c0 = self.c0
m0 = self.m0
n0 = self.n0
funcnum = self.funcnum
nlmodule = modelDB.db[self.model]['non-linear']
if nlmodule:
calc_k0L = nlmodule.calc_k0L
calc_kG = nlmodule.calc_kG
calc_kLL = nlmodule.calc_kLL
ni_method = self.ni_method
nx = self.nx
nt = self.nt
kG = calc_kG(c, alpharad, r1, L, tminrad, tmaxrad, F, m1, n1,
nx=nx, nt=nt, num_cores=num_cores, method=ni_method,
c0=c0, m0=m0, n0=n0)
k0L = calc_k0L(c, alpharad, r1, L, tminrad, tmaxrad, F, m1, n1,
nx=nx, nt=nt, num_cores=num_cores, method=ni_method,
c0=c0, m0=m0, n0=n0)
kLL = calc_kLL(c, alpharad, r1, L, tminrad, tmaxrad, F, m1, n1,
nx=nx, nt=nt, num_cores=num_cores, method=ni_method,
c0=c0, m0=m0, n0=n0)
else:
raise ValueError(
'Non-Linear analysis not implemented for model {0}'.format(
self.model))
kL0 = k0L.T
#TODO maybe slow...
self.kT = self.k0 + k0L + kL0 + kLL + kG
#NOTE intended for non-linear eigenvalue analyses
self.kL = self.k0 + k0L + kL0 + kLL
self.kG = kG
msg('finished!', level=2)
def uvw(self, c, xs=None, ts=None, gridx=300, gridt=300):
r"""Calculates the displacement field
For a given full set of Ritz constants ``c``, the displacement
field is calculated and stored in the parameters
``u``, ``v``, ``w``, ``phix``, ``phit`` of the ``KPanelT`` object.
Parameters
----------
c : float
The full set of Ritz constants
xs : np.ndarray
The `x` positions where to calculate the displacement field.
Default is ``None`` and the method ``_default_field`` is used.
ts : np.ndarray
The ``theta`` positions where to calculate the displacement field.
Default is ``None`` and the method ``_default_field`` is used.
gridx : int
Number of points along the `x` axis where to calculate the
displacement field.
gridt : int
Number of points along the `theta` where to calculate the
displacement field.
Returns
-------
out : tuple
A tuple of ``np.ndarrays`` containing
``(u, v, w, phix, phit)``.
Notes
-----
The returned values ``u```, ``v``, ``w``, ``phix``, ``phit`` are
stored as parameters with the same name in the ``KPanelT`` object.
"""
c = np.ascontiguousarray(c, dtype=DOUBLE)
xs, ts, xshape, tshape = self._default_field(xs, ts, gridx, gridt)
alpharad = self.alpharad
m1 = self.m1
n1 = self.n1
r1 = self.r1
L = self.L
tminrad = self.tminrad
tmaxrad = self.tmaxrad
model = self.model
fuvw = modelDB.db[model]['commons'].fuvw
us, vs, ws, phixs, phits = fuvw(c, m1, n1, L, tminrad,
tmaxrad, xs, ts, r1, alpharad, self.out_num_cores)
self.u = us.reshape(xshape)
self.v = vs.reshape(xshape)
self.w = ws.reshape(xshape)
self.phix = phixs.reshape(xshape)
self.phit = phits.reshape(xshape)
return self.u, self.v, self.w, self.phix, self.phit
def strain(self, c, xs=None, ts=None, gridx=300, gridt=300):
r"""Calculates the strain field
Parameters
----------
c : np.ndarray
The Ritz constants vector to be used for the strain field
calculation.
xs : np.ndarray, optional
The `x` coordinates where to calculate the strains.
ts : np.ndarray, optional
The `\theta` coordinates where to calculate the strains, must
have the same shape as ``xs``.
gridx : int, optional
When ``xs`` and ``ts`` are not supplied, ``gridx`` and ``gridt``
are used.
gridt : int, optional
When ``xs`` and ``ts`` are not supplied, ``gridx`` and ``gridt``
are used.
"""
c = np.ascontiguousarray(c, dtype=DOUBLE)
xs, ts, xshape, tshape = self._default_field(xs, ts, gridx, gridt)
alpharad = self.alpharad
r1 = self.r1
L = self.L
tminrad = self.tminrad
tmaxrad = self.tmaxrad
sina = self.sina
cosa = self.cosa
m1 = self.m1
n1 = self.n1
c0 = self.c0
m0 = self.m0
n0 = self.n0
funcnum = self.funcnum
model = self.model
NL_kinematics = model.split('_')[1]
fstrain = modelDB.db[model]['commons'].fstrain
e_num = modelDB.db[model]['e_num']
if 'donnell' in NL_kinematics:
int_NL_kinematics = 0
elif 'sanders' in NL_kinematics:
int_NL_kinematics = 1
else:
raise NotImplementedError(
'{} is not a valid NL_kinematics option'.format(NL_kinematics))
es = fstrain(c, sina, cosa, xs, ts, r1, L, tminrad, tmaxrad, m1, n1,
c0, m0, n0, funcnum, int_NL_kinematics, self.out_num_cores)
return es.reshape((xshape + (e_num,)))
def stress(self, c, xs=None, ts=None, gridx=300, gridt=300):
r"""Calculates the stress field
Parameters
----------
c : np.ndarray
The Ritz constants vector to be used for the strain field
calculation.
xs : np.ndarray, optional
The `x` coordinates where to calculate the strains.
ts : np.ndarray, optional
The `\theta` coordinates where to calculate the strains, must
have the same shape as ``xs``.
gridx : int, optional
When ``xs`` and ``ts`` are not supplied, ``gridx`` and ``gridt``
are used.
gridt : int, optional
When ``xs`` and ``ts`` are not supplied, ``gridx`` and ``gridt``
are used.
"""
c = np.ascontiguousarray(c, dtype=DOUBLE)
xs, ts, xshape, tshape = self._default_field(xs, ts, gridx, gridt)
F = self.F
alpharad = self.alpharad
r1 = self.r1
L = self.L
tminrad = self.tminrad
tmaxrad = self.tmaxrad
sina = self.sina
cosa = self.cosa
m1 = self.m1
n1 = self.n1
c0 = self.c0
m0 = self.m0
n0 = self.n0
funcnum = self.funcnum
model = self.model
NL_kinematics = model.split('_')[1]
fstress = modelDB.db[model]['commons'].fstress
e_num = modelDB.db[model]['e_num']
if 'donnell' in NL_kinematics:
int_NL_kinematics = 0
elif 'sanders' in NL_kinematics:
int_NL_kinematics = 1
else:
raise NotImplementedError(
'{} is not a valid NL_kinematics option'.format(
NL_kinematics))
Ns = fstress(c, F, sina, cosa, xs, ts, r1, L, tminrad, tmaxrad, m1,
n1, c0, m0, n0, funcnum, int_NL_kinematics,
self.out_num_cores)
return Ns.reshape((xshape + (e_num,)))
def add_SPL(self, PL, pt=0.5, theta=0., cte=True):
"""Add a Single Perturbation Load `\{{F_{PL}}_i\}`
The perturbation load is a particular case of the punctual load which
as only the normal component (along the `z` axis).
Parameters
----------
PL : float
The perturbation load value.
pt : float, optional
The normalized meridional in which the new SPL will be included.
theta : float, optional
The angular position in radians.
cte : bool, optional
Constant forces are not incremented during the non-linear
analysis.
Notes
-----
Each single perturbation load is added to the ``forces`` parameter of
the ``KPanelT`` object if ``cte=True``, or to the ``forces_inc``
parameter if ``cte=False``, which may be changed by the analyst at any
time.
"""
self._rebuild()
if cte:
self.forces.append([pt*self.L, theta, 0., 0., PL])
else:
self.forces_inc.append([pt*self.L, theta, 0., 0., PL])
def add_force(self, x, theta, fx, ftheta, fz, cte=True):
r"""Add a punctual force with three components
Parameters
----------
x : float
The `x` position.
theta : float
The `\theta` position in radians.
fx : float
The `x` component of the force vector.
ftheta : float
The `\theta` component of the force vector.
fz : float
The `z` component of the force vector.
cte : bool, optional
Constant forces are not incremented during the non-linear
analysis.
"""
if cte:
self.forces.append([x, theta, fx, ftheta, fz])
else:
self.forces_inc.append([x, theta, fx, ftheta, fz])
def calc_fext(self, inc=1., silent=False):
"""Calculates the external force vector `\{F_{ext}\}`
Recall that:
.. math::
\{F_{ext}\}=\{{F_{ext}}_0\} + \{{F_{ext}}_\lambda\}
such that the terms in `\{{F_{ext}}_0\}` are constant and the terms in
`\{{F_{ext}}_\lambda\}` will be scaled by the parameter ``inc``.
Parameters
----------
inc : float, optional
Since this function is called during the non-linear analysis,
``inc`` will multiply the terms `\{{F_{ext}}_\lambda\}`.
silent : bool, optional
A boolean to tell whether the log messages should be printed.
Returns
-------
fext : np.ndarray
The external force vector
"""
self._rebuild()
msg('Calculating external forces...', level=2, silent=silent)
sina = self.sina
cosa = self.cosa
r2 = self.r2
L = self.L
tminrad = self.tminrad
tmaxrad = self.tmaxrad
m1 = self.m1
n1 = self.n1
model = self.model
if not model in modelDB.db.keys():
raise ValueError(
'{} is not a valid model option'.format(model))
db = modelDB.db
num0 = db[model]['num0']
num1 = db[model]['num1']
dofs = db[model]['dofs']
fg = db[model]['commons'].fg
size = self.get_size()
g = np.zeros((dofs, size), dtype=DOUBLE)
fext = np.zeros(size, dtype=DOUBLE)
# non-incrementable punctual forces
for i, force in enumerate(self.forces):
x, theta, fx, ftheta, fz = force
fg(g, m1, n1, x, theta, L, tminrad, tmaxrad)
if dofs == 3:
fpt = np.array([[fx, ftheta, fz]])
elif dofs == 5:
fpt = np.array([[fx, ftheta, fz, 0, 0]])
fext += fpt.dot(g).ravel()
# incrementable punctual forces
for i, force in enumerate(self.forces_inc):
x, theta, fx, ftheta, fz = force
fg(g, m1, n1, x, theta, L, tminrad, tmaxrad)
if dofs == 3:
fpt = np.array([[fx, ftheta, fz]])*inc
elif dofs == 5:
fpt = np.array([[fx, ftheta, fz, 0, 0]])*inc
fext += fpt.dot(g).ravel()
# NxxTop
NxxTop = self.NxxTop
NttLeft = self.NttLeft
NxxTop += inc*self.NxxTop_inc
NttLeft += inc*self.NttLeft_inc
Nxx0 = NxxTop[0]
for j1 in range(n1):
if j1 > 0:
Nxxj = NxxTop[j1]
for i1 in range(m1):
col = num1*((j1)*m1 + (i1))
if j1 == 0:
fext[col+0] += (-1)**i1*Nxx0*r2*(tmaxrad - tminrad)
else:
fext[col+0] += 1/2.*(-1)**i1*Nxxj*r2*(tmaxrad - tminrad)
msg('finished!', level=2, silent=silent)
if np.all(fext==0):
raise ValueError('No load was applied!')
return fext
def calc_k0(self):
self.calc_linear_matrices()
return self.k0
def calc_fint(self, c, inc=1., m=1):
r"""Calculates the internal force vector `\{F_{int}\}`
The following attributes affect the numerical integration:
================= ================================================
Attribute Description
================= ================================================
``ni_num_cores`` ``int``, number of cores used for the numerical
integration
``ni_method`` ``str``, integration method:
- ``'trapz2d'`` for 2-D Trapezoidal's rule
- ``'simps2d'`` for 2-D Simpsons' rule
``nx`` ``int``, number of integration points along the
`x` coordinate
``nt`` ``int``, number of integration points along the
`\theta` coordinate
================= ================================================
Parameters
----------
c : np.ndarray
The Ritz constants that will be used to compute the internal
forces.
inc : float, optional
A load multiplier only needed to fit the correct function
signature.
m : integer, optional
A multiplier to the number of integration points if one wishes to
use more integration points to calculate `\{F_{int}\}` than to
calculate `[K_T]`.
Returns
-------
fint : np.ndarray
The internal force vector.
"""
ni_num_cores = self.ni_num_cores
ni_method = self.ni_method
nlmodule = modelDB.db[self.model]['non-linear']
nx = self.nx*m
nt = self.nt*m
fint = nlmodule.calc_fint_0L_L0_LL(c, self.alpharad, self.r1, self.L,
self.tminrad, self.tmaxrad, self.F, self.m1, self.n1, nx, nt,
ni_num_cores, ni_method, self.c0, self.m0, self.n0)
fint += self.k0*c
return fint
def calc_kT(self, c, inc=1.):
r"""Calculates the tangent stiffness matrix
The following attributes affect the numerical integration:
================= ================================================
Attribute Description
================= ================================================
``ni_num_cores`` ``int``, number of cores used for the numerical
integration
``ni_method`` ``str``, integration method:
- ``'trapz2d'`` for 2-D Trapezoidal's rule
- ``'simps2d'`` for 2-D Simpsons' rule
``nx`` ``int``, number of integration points along the
`x` coordinate
``nt`` ``int``, number of integration points along the
`\theta` coordinate
================= ================================================
Parameters
----------
c : np.ndarray
The Ritz constant vector of the current state.
inc : float, optional
A load multiplier only needed to fit the correct function
signature.
Returns
-------
kT : sparse matrix
The tangent stiffness matrix.
"""
self.calc_NL_matrices(c)
return self.kT
def static(self, NLgeom=False, silent=False):
"""Static analysis for cones and cylinders
The analysis can be linear or geometrically non-linear. See
:class:`.Analysis` for further details about the parameters
controlling the non-linear analysis.
Parameters
----------
NLgeom : bool
Flag to indicate whether a linear or a non-linear analysis is to
be performed.
silent : bool, optional
A boolean to tell whether the log messages should be printed.
Returns
-------
cs : list
A list containing the Ritz constants for each load increment of
the static analysis. The list will have only one entry in case
of a linear analysis.
Notes
-----
The returned ``cs`` is stored in ``self.analysis.cs``. The actual
increments used in the non-linear analysis are stored in the
``self.analysis.increments`` parameter.
"""
if self.c0 is not None:
self.analysis.kT_initial_state = True
else:
self.analysis.kT_initial_state = False
if NLgeom and not modelDB.db[self.model]['non-linear static']:
msg('________________________________________________',
silent=silent)
msg('', silent=silent)
warn('Model {} cannot be used in non-linear static analysis!'.
format(self.model), silent=silent)
msg('________________________________________________',
silent=silent)
raise
elif not NLgeom and not modelDB.db[self.model]['linear static']:
msg('________________________________________________',
level=1, silent=silent)
msg('', level=1, silent=silent)
warn('Model {} cannot be used in linear static analysis!'.
format(self.model), level=1, silent=silent)
msg('________________________________________________',
level=1, silent=silent)
raise
self.analysis.static(NLgeom=NLgeom, silent=silent)
self.cs = self.analysis.cs
self.increments = self.analysis.increments
return self.analysis.cs
def plot(self, c, invert_theta=False, plot_type=1, vec='w',
deform_u=False, deform_u_sf=100.,
filename='',
ax=None, figsize=(3.5, 2.), save=True,
add_title=False, title='',
colorbar=False, cbar_nticks=2, cbar_format=None,
cbar_title='', cbar_fontsize=10,
aspect='equal', clean=True, dpi=400,
texts=[], xs=None, ts=None, gridx=300, gridt=300,
num_levels=400, vecmin=None, vecmax=None):
r"""Contour plot for a Ritz constants vector.
Parameters
----------
c : np.ndarray
The Ritz constants that will be used to compute the field contour.
vec : str, optional
Can be one of the components:
- Displacement: ``'u'``, ``'v'``, ``'w'``, ``'phix'``, ``'phit'``
- Strain: ``'exx'``, ``'ett'``, ``'gxt'``, ``'kxx'``, ``'ktt'``,
``'kxt'``, ``'gtz'``, ``'gxz'``
- Stress: ``'Nxx'``, ``'Ntt'``, ``'Nxt'``, ``'Mxx'``, ``'Mtt'``,
``'Mxt'``, ``'Qt'``, ``'Qx'``
deform_u : bool, optional
If ``True`` the contour plot will look deformed.
deform_u_sf : float, optional
The scaling factor used to deform the contour.
invert_theta : bool, optional
Inverts the `\theta` axis of the plot. It may be used to match
the coordinate system of the finite element models created
using the ``desicos.abaqus`` module.
plot_type : int, optional
For cylinders only ``4`` and ``5`` are valid.
For cones all the following types can be used:
- ``1``: concave up (with ``invert_theta=False``) (default)
- ``2``: concave down (with ``invert_theta=False``)
- ``3``: stretched closed
- ``4``: stretched opened (`r \times \theta` vs. `L`)
- ``5``: stretched opened (`\theta` vs. `L`)
save : bool, optional
Flag telling whether the contour should be saved to an image file.
dpi : int, optional
Resolution of the saved file in dots per inch.
filename : str, optional
The file name for the generated image file. If no value is given,
the `name` parameter of the ``KPanelT`` object will be used.
ax : AxesSubplot, optional
When ``ax`` is given, the contour plot will be created inside it.
figsize : tuple, optional
The figure size given by ``(width, height)``.
add_title : bool, optional
If a title should be added to the figure.
title : str, optional
If any string is given ``add_title`` will be ignored and the given
title added to the contour plot.
colorbar : bool, optional
If a colorbar should be added to the contour plot.
cbar_nticks : int, optional
Number of ticks added to the colorbar.
cbar_format : [ None | format string | Formatter object ], optional
See the ``matplotlib.pyplot.colorbar`` documentation.
cbar_fontsize : int, optional
Fontsize of the colorbar labels.
cbar_title : str, optional
Colorbar title. If ``cbar_title == ''`` no title is added.
aspect : str, optional
String that will be passed to the ``AxesSubplot.set_aspect()``
method.
clean : bool, optional
Clean axes ticks, grids, spines etc.
xs : np.ndarray, optional
The `x` positions where to calculate the displacement field.
Default is ``None`` and the method ``_default_field`` is used.
ts : np.ndarray, optional
The ``theta`` positions where to calculate the displacement field.
Default is ``None`` and the method ``_default_field`` is used.
gridx : int, optional
Number of points along the `x` axis where to calculate the
displacement field.
gridt : int, optional
Number of points along the `theta` where to calculate the
displacement field.
num_levels : int, optional
Number of contour levels (higher values make the contour smoother).
vecmin : float, optional
Minimum value for the contour scale (useful to compare with other
results). If not specified it will be taken from the calculated
field.
vecmax : float, optional
Maximum value for the contour scale.
Returns
-------
ax : matplotlib.axes.Axes
The Matplotlib object that can be used to modify the current plot
if needed.
"""
msg('Plotting contour...')
ubkp, vbkp, wbkp, phixbkp, phitbkp = (self.u, self.v, self.w,
self.phix, self.phit)
import matplotlib.pyplot as plt
import matplotlib
msg('Computing field variables...', level=1)
displs = ['u', 'v', 'w', 'phix', 'phit']
strains = ['exx', 'ett', 'gxt', 'kxx', 'ktt', 'kxt', 'gtz', 'gxz']
stresses = ['Nxx', 'Ntt', 'Nxt', 'Mxx', 'Mtt', 'Mxt', 'Qt', 'Qx']
if vec in displs:
self.uvw(c, xs=xs, ts=ts, gridx=gridx, gridt=gridt)
field = getattr(self, vec)
elif vec in strains:
es = self.strain(c, xs=xs, ts=ts,
gridx=gridx, gridt=gridt)
field = es[..., strains.index(vec)]
elif vec in stresses:
Ns = self.stress(c, xs=xs, ts=ts,
gridx=gridx, gridt=gridt)
field = Ns[..., stresses.index(vec)]
else:
raise ValueError(
'{0} is not a valid vec parameter value!'.format(vec))
msg('Finished!', level=1)
Xs = self.Xs
Ts = self.Ts
if vecmin is None:
vecmin = field.min()
if vecmax is None:
vecmax = field.max()
levels = linspace(vecmin, vecmax, num_levels)
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
else:
if isinstance(ax, matplotlib.axes.Axes):
ax = ax
fig = ax.figure
save = False
else:
raise ValueError('ax must be an Axes object')
def r(x):
return self.r1 - self.sina*(x + self.L/2.)
if self.is_cylinder:
plot_type = 4
if plot_type == 1:
r_plot = self.r2/self.sina + self.L/2.-Xs
r_plot_max = self.r2/self.sina + self.L
y = r_plot_max - r_plot*cos(Ts*self.sina)
x = r_plot*sin(Ts*self.sina)
elif plot_type == 2:
r_plot = self.r2/self.sina + self.L/2.-Xs
y = r_plot*cos(Ts*self.sina)
x = r_plot*sin(Ts*self.sina)
elif plot_type == 3:
r_plot = self.r2/self.sina + self.L/2.-Xs
r_plot_max = self.r2/self.sina + self.L
y = r_plot_max - r_plot*cos(Ts)
x = r_plot*sin(Ts)
elif plot_type == 4:
x = r(Xs)*Ts
y = Xs
elif plot_type == 5:
x = Ts
y = Xs
if deform_u:
if vec in displs:
pass
else:
self.uvw(c, xs=xs, ts=ts, gridx=gridx, gridt=gridt)
field_u = self.u
field_v = self.v
y -= deform_u_sf*field_u
x += deform_u_sf*field_v
contour = ax.contourf(x, y, field, levels=levels)
if colorbar:
from mpl_toolkits.axes_grid1 import make_axes_locatable
fsize = cbar_fontsize
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbarticks = linspace(vecmin, vecmax, cbar_nticks)
cbar = plt.colorbar(contour, ticks=cbarticks, format=cbar_format,
cax=cax)
if cbar_title:
cax.text(0.5, 1.05, cbar_title, horizontalalignment='center',
verticalalignment='bottom', fontsize=fsize)
cbar.outline.remove()
cbar.ax.tick_params(labelsize=fsize, pad=0., tick2On=False)
if invert_theta == True:
ax.invert_yaxis()
ax.invert_xaxis()
if title != '':
ax.set_title(str(title))
elif add_title:
if self.analysis.last_analysis == 'static':
ax.set_title('$m_1, n_1={0}, {1}$'.format(self.m1, self.n1))
elif self.analysis.last_analysis == 'lb':
ax.set_title(
r'$m_1, n_1={0}, {1}$, $\lambda_{{CR}}={4:1.3e}$'.format(self.m1,
self.n1, self.eigvals[0]))
fig.tight_layout()
ax.set_aspect(aspect)
ax.grid(False)
ax.set_frame_on(False)
if clean:
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
else:
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for kwargs in texts:
ax.text(transform=ax.transAxes, **kwargs)
if save:
if not filename:
filename = 'test.png'
fig.savefig(filename, transparent=True,
bbox_inches='tight', pad_inches=0.05, dpi=dpi)
plt.close()
if ubkp is not None:
self.u = ubkp
if vbkp is not None:
self.v = vbkp
if wbkp is not None:
self.w = wbkp
if phixbkp is not None:
self.phix = phixbkp
if phitbkp is not None:
self.phit = phitbkp
msg('finished!')
return ax
def save(self):
"""Save the ``KPanelT`` object using ``cPickle``
Notes
-----
The pickled file will have the name stored in ``KPanelT.name``
followed by a ``'.KPanelT'`` extension.
"""
name = self.name + '.KPanelT'
msg('Saving KPanelT to {}'.format(name))
self._clear_matrices()
with open(name, 'wb') as f:
cPickle.dump(self, f, protocol=cPickle.HIGHEST_PROTOCOL)
| bsd-3-clause |
yanlend/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
pyrocko/pyrocko | test/base/test_gshhg.py | 1 | 7321 | from __future__ import division, print_function, absolute_import
import os
import unittest
import numpy as num
from numpy.testing import assert_array_less
from pyrocko.dataset import gshhg
from pyrocko import util
plot = int(os.environ.get('MPL_SHOW', 0))
class BB(object):
west = -10.
east = 20.
south = 35.
north = 55.
tpl = (west, east, south, north)
class BBLakes(object):
west = 4.41
east = 13.14
south = 42.15
north = 49.32
tpl = (west, east, south, north)
class BBPonds(object):
west, east, south, north = (
304.229444, 306.335556, -3.228889, -1.1358329999999999)
tpl = (west, east, south, north)
class BBOtherSide(object):
west = -5.
east = 5.
south = 30.
north = 50.
tpl = (west, east, south, north)
class GSHHGTest(unittest.TestCase):
def setUp(self):
self.gshhg = gshhg.GSHHG.intermediate()
def test_polygon_loading_points(self):
for ipoly in range(10):
self.gshhg.polygons[ipoly].points
def slow_bb_ranges(self):
for gs in [
gshhg.GSHHG.crude(),
gshhg.GSHHG.low(),
gshhg.GSHHG.intermediate(),
gshhg.GSHHG.high(),
gshhg.GSHHG.full()]:
for ipoly, poly in enumerate(gs.polygons):
assert gshhg.is_valid_polygon(poly.points)
assert gshhg.is_valid_bounding_box(poly.get_bounding_box())
assert gshhg.is_polygon_in_bounding_box(
poly.points, poly.get_bounding_box())
def test_polygon_contains_point(self):
poly = self.gshhg.polygons[-1]
p_within = num.array(
[poly.south + (poly.north - poly.south) / 2,
poly.west + (poly.east - poly.west) / 2])
p_outside = num.array(
[poly.north + (poly.north - poly.south) / 2,
poly.east + (poly.east - poly.west) / 2])
if plot:
import matplotlib.pyplot as plt
ax = plt.axes()
self.plot_polygons([poly], ax)
ax.scatter(p_within[1], p_within[0], s=10.)
ax.scatter(p_outside[1], p_outside[0], s=10.)
plt.show()
assert poly.contains_point(p_within) is True
assert poly.contains_point(p_outside) is False
def test_latlon(self):
p = self.gshhg.polygons[0]
assert_array_less(num.zeros_like(p.lons) - 0.001, p.lons)
assert_array_less(p.lats, num.ones_like(p.lats) * 90 + 0.001)
def test_polygon_level_selection(self):
for p in self.gshhg.polygons:
p.is_land()
p.is_island_in_lake()
p.is_pond_in_island_in_lake()
p.is_antarctic_icefront()
p.is_antarctic_grounding_line()
def test_polygon_contains_points(self):
poly = self.gshhg.polygons[0]
points = num.array(
[num.random.uniform(BB.south, BB.north, size=100),
num.random.uniform(BB.east, BB.west, size=100)]).T
pts = poly.contains_points(points)
if plot:
import matplotlib.pyplot as plt
points = points[pts]
colors = num.ones(points.shape[0]) * pts[pts]
ax = plt.axes()
self.plot_polygons([poly], ax)
ax.scatter(points[:, 1], points[:, 0], c=colors, s=.5)
plt.show()
def test_bounding_box_select(self):
p = self.gshhg.get_polygons_within(*BB.tpl)
if plot:
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
ax = plt.axes()
self.plot_polygons(p, ax)
ax.add_patch(
Rectangle([BB.west, BB.south],
width=BB.east-BB.west, height=BB.north-BB.south,
alpha=.2))
plt.show()
def test_mask_land(self):
poly = self.gshhg.get_polygons_within(*BBPonds.tpl)
points = num.array(
[num.random.uniform(BBPonds.south, BBPonds.north, size=10000),
num.random.uniform(BBPonds.east, BBPonds.west, size=10000)]).T
pts = self.gshhg.get_land_mask(points)
if plot:
import matplotlib.pyplot as plt
points = points
colors = num.ones(points.shape[0]) * pts
# colors = num.ones(points.shape[0]) * pts
ax = plt.axes()
self.plot_polygons(poly, ax)
ax.scatter(points[:, 1], points[:, 0], c=colors, s=.5, zorder=2)
plt.show()
for is_land, point in zip(pts[:20], points[:20]):
is_land2 = self.gshhg.is_point_on_land(*point)
assert is_land == is_land2
def test_is_point_on_land(self):
point = (46.455289, 6.494283)
p = self.gshhg.get_polygons_at(*point)
assert self.gshhg.is_point_on_land(*point) is False
if plot:
import matplotlib.pyplot as plt
ax = plt.axes()
self.plot_polygons(p, ax)
ax.scatter(point[1], point[0])
plt.show()
@staticmethod
def plot_polygons(polygons, ax, **kwargs):
# from matplotlib.patches import Polygon
from pyrocko.plot import mpl_color
args = {
'edgecolor': 'red',
}
args.update(kwargs)
colormap = [
mpl_color('aluminium2'),
mpl_color('skyblue1'),
mpl_color('aluminium4'),
mpl_color('skyblue2'),
mpl_color('white'),
mpl_color('aluminium1')]
for p in polygons:
ax.plot(
p.points[:, 1], p.points[:, 0],
color=colormap[p.level_no-1])
# map(ax.add_patch, [Polygon(num.fliplr(p.points),
# facecolor=colormap[p.level_no-1],
# **args)
# for p in polygons])
def test_pond(self):
for poly in self.gshhg.polygons:
if poly.is_pond_in_island_in_lake():
(w, e, s, n) = poly.get_bounding_box()
# print (w-1., e+1, s-1., n+1)
polys2 = self.gshhg.get_polygons_within(w-1., e+1, s-1., n+1)
if plot:
import matplotlib.pyplot as plt
ax = plt.axes()
self.plot_polygons(polys2, ax)
ax.autoscale_view()
plt.show()
def test_other_side(self):
bb = BBOtherSide
poly = self.gshhg.get_polygons_within(*bb.tpl)
points = num.array(
[num.random.uniform(bb.south, bb.north, size=1000),
num.random.uniform(bb.east, bb.west, size=1000)]).T
pts = self.gshhg.get_land_mask(points)
if plot:
import matplotlib.pyplot as plt
points = points
colors = num.ones(points.shape[0]) * pts
# colors = num.ones(points.shape[0]) * pts
ax = plt.axes()
self.plot_polygons(poly, ax)
ax.scatter(points[:, 1], points[:, 0], c=colors, s=.5, zorder=2)
plt.show()
if __name__ == "__main__":
plot = False
util.setup_logging('test_gshhg', 'debug')
unittest.main(exit=False)
| gpl-3.0 |
mixturemodel-flow/tensorflow | tensorflow/tools/dist_test/python/census_widendeep.py | 42 | 11900 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columns (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the cesnsus data"
)
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/dataframe/dataframe.py | 85 | 4704 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
| agpl-3.0 |
phiros/nepi | examples/omf/vod_exp/demo_plot.py | 1 | 10514 | import matplotlib
matplotlib.use('GTK')
import matplotlib.pyplot as plt
import numpy as np
import os
import time
import subprocess
##### Parsing Argument to Plot #####
from optparse import OptionParser
usage = ("usage: %prog -p <type-of-plot> -d <type-of-packets> -f <folder-with-stats>")
parser = OptionParser(usage = usage)
parser.add_option("-p", "--plot", dest="plot",
help="Type of Plot : vod_broad_cli | vod_broad_wlan | vod_broad_eth | broad_all | vod_all", type="string")
parser.add_option("-d", "--packet", dest="packet",
help="Packet to use for the plot : frames | bytes", type="string")
parser.add_option("-f", "--folder", dest="folder",
help="Folder with the statistics ", type="string")
(options, args) = parser.parse_args()
plot = options.plot
packet = options.packet
folder = options.folder
##### Initialize the data #####
overall_stats_broad = {}
overall_stats_vod = {}
for i in [1, 3, 5]:
overall_stats_broad[i] = {}
overall_stats_broad[i]['eth'] = []
overall_stats_broad[i]['wlan'] = []
overall_stats_broad[i]['cli'] = []
overall_stats_vod[i] = {}
overall_stats_vod[i]['eth'] = []
overall_stats_vod[i]['wlan'] = []
overall_stats_vod[i]['cli'] = []
all_broad_folders = os.listdir(folder + 'demo_openlab_traces/broadcast')
all_vod_folders = os.listdir(folder + 'demo_openlab_traces/vod')
data_broad_folders = list()
data_vod_folders = list()
# Loop to take only the wanted folder
for f in all_broad_folders :
if f.startswith('s_'):
data_broad_folders.append(f)
for f in all_vod_folders :
if f.startswith('s_'):
data_vod_folders.append(f)
##### For Broadcast #####
stats_broad_wlan = list()
stats_broad_eth = list()
stats_broad_cli = list()
# Write the wanted statistics into a file
for exp in data_broad_folders :
broad_file = os.listdir(folder + 'demo_openlab_traces/broadcast/'+exp)
for f in broad_file :
dest = folder + "demo_openlab_traces/broadcast/" + exp + "/stats_" + f + ".txt"
command = "tshark -r " + folder + "demo_openlab_traces/broadcast/" + exp + "/" + f + " -z io,phs > " + dest
if f.startswith('capwificen_wlan'):
p = subprocess.Popen(command , shell=True)
p.wait()
stats_broad_wlan.append(dest)
if f.startswith('capwificen_eth'):
p = subprocess.Popen(command , shell=True)
p.wait()
stats_broad_eth.append(dest)
if f.startswith('capcli'):
p = subprocess.Popen(command , shell=True)
p.wait()
stats_broad_cli.append(dest)
# Numer of client that was used
def nb_client(s):
elt = s.split('_')
if elt[-2] == '1':
return 1
if elt[-2] == '3':
return 3
if elt[-2] == '5':
return 5
# Return the value wanted
def get_broad_values(list_files, type_file):
for s in list_files:
nb = nb_client(s)
o = open(s, 'r')
for l in o:
if 'udp' in l:
row = l.split(':')
f = row[1].split(' ')
frame = int(f[0])
byte = int(row[2])
res = {}
res['frames'] = frame
res['bytes'] = byte
if frame < 20 :
continue
overall_stats_broad[nb][type_file].append(res)
o.close()
get_broad_values(stats_broad_wlan, 'wlan')
get_broad_values(stats_broad_eth, 'eth')
get_broad_values(stats_broad_cli, 'cli')
#print overall_stats_broad
##### For VOD #####
stats_vod_wlan = list()
stats_vod_eth = list()
stats_vod_cli = list()
# Write the wanted statistics into a file
for exp in data_vod_folders :
vod_file = os.listdir(folder + 'demo_openlab_traces/vod/'+exp)
for f in vod_file :
dest = folder + "/demo_openlab_traces/vod/" + exp + "/stats_" + f + ".txt"
command = "tshark -r " + folder + "demo_openlab_traces/vod/" + exp + "/" + f + " -z io,phs > " + dest
if f.startswith('capwificen_wlan'):
p = subprocess.Popen(command , shell=True)
p.wait()
stats_vod_wlan.append(dest)
if f.startswith('capwificen_eth'):
p = subprocess.Popen(command , shell=True)
p.wait()
stats_vod_eth.append(dest)
if f.startswith('capcli'):
p = subprocess.Popen(command , shell=True)
p.wait()
stats_vod_cli.append(dest)
# Return the value wanted
def get_vod_values(list_files, type_file):
for s in list_files:
nb = nb_client(s)
o = open(s, 'r')
for l in o:
if 'udp' in l:
row = l.split(':')
f = row[1].split(' ')
frame = int(f[0])
byte = int(row[2])
res = {}
res['frames'] = frame
res['bytes'] = byte
if frame < 100 :
continue
overall_stats_vod[nb][type_file].append(res)
o.close()
get_vod_values(stats_vod_wlan, 'wlan')
get_vod_values(stats_vod_eth, 'eth')
get_vod_values(stats_vod_cli, 'cli')
#print overall_stats_vod
##### For Plotting #####
if plot != "vod_all":
means_broad_cli = list()
std_broad_cli = list()
means_broad_wlan = list()
std_broad_wlan = list()
means_broad_eth = list()
std_broad_eth = list()
for i in [1, 3, 5]:
data_cli = list()
for elt in overall_stats_broad[i]['cli']:
data_cli.append(elt['frames'])
samples = np.array(data_cli)
m = samples.mean()
std = np.std(data_cli)
means_broad_cli.append(m)
std_broad_cli.append(std)
data_wlan = list()
for elt in overall_stats_broad[i]['wlan']:
data_wlan.append(elt['frames'])
samples = np.array(data_wlan)
m = samples.mean()
std = np.std(data_wlan)
means_broad_wlan.append(m)
std_broad_wlan.append(std)
data_eth = list()
for elt in overall_stats_broad[i]['eth']:
data_eth.append(elt['frames'])
samples = np.array(data_eth)
m = samples.mean()
std = np.std(data_eth)
means_broad_eth.append(m)
std_broad_eth.append(std)
if plot != "broad_all":
means_vod_cli = list()
std_vod_cli = list()
means_vod_wlan = list()
std_vod_wlan = list()
means_vod_eth = list()
std_vod_eth = list()
for i in [1, 3, 5]:
data_cli = list()
for elt in overall_stats_vod[i]['cli']:
data_cli.append(elt['frames'])
samples = np.array(data_cli)
m = samples.mean()
std = np.std(data_cli)
means_vod_cli.append(m)
std_vod_cli.append(std)
data_wlan = list()
for elt in overall_stats_vod[i]['wlan']:
data_wlan.append(elt['frames'])
samples = np.array(data_wlan)
m = samples.mean()
std = np.std(data_wlan)
means_vod_wlan.append(m)
std_vod_wlan.append(std)
data_eth = list()
for elt in overall_stats_vod[i]['eth']:
data_eth.append(elt['frames'])
samples = np.array(data_eth)
m = samples.mean()
std = np.std(data_eth)
means_vod_eth.append(m)
std_vod_eth.append(std)
### To plot ###
n_groups = 3
#Put the right numbers
if plot == "broad_all":
means_bars1 = tuple(means_broad_cli)
std_bars1 = tuple(std_broad_cli)
means_bars2 = tuple(means_broad_wlan)
std_bars2 = tuple(std_broad_wlan)
means_bars3 = tuple(means_broad_eth)
std_bars3 = tuple(std_broad_eth)
if plot == "vod_all":
means_bars1 = tuple(means_vod_cli)
std_bars1 = tuple(std_vod_cli)
means_bars2 = tuple(means_vod_wlan)
std_bars2 = tuple(std_vod_wlan)
means_bars3 = tuple(means_vod_eth)
std_bars3 = tuple(std_vod_eth)
if plot == "vod_broad_cli":
means_bars1 = tuple(means_broad_cli)
std_bars1 = tuple(std_broad_cli)
means_bars2 = tuple(means_vod_cli)
std_bars2 = tuple(std_vod_cli)
if plot == "vod_broad_wlan":
means_bars1 = tuple(means_broad_wlan)
std_bars1 = tuple(std_broad_wlan)
means_bars2 = tuple(means_vod_wlan)
std_bars2 = tuple(std_vod_wlan)
if plot == "vod_broad_eth":
means_bars1 = tuple(means_broad_eth)
std_bars1 = tuple(std_broad_eth)
means_bars2 = tuple(means_vod_eth)
std_bars2 = tuple(std_vod_eth)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.3
opacity = 0.4
error_config = {'ecolor': '0.3'}
if plot == "vod_all" or plot == "broad_all" :
rects1 = plt.bar(index, means_bars1, bar_width,
alpha=opacity,
color='y',
yerr=std_bars1,
error_kw=error_config,
label='Client')
rects2 = plt.bar(index + bar_width, means_bars2, bar_width,
alpha=opacity,
color='g',
yerr=std_bars2,
error_kw=error_config,
label='Wlan')
rects3 = plt.bar(index + 2*bar_width, means_bars3, bar_width,
alpha=opacity,
color='r',
yerr=std_bars3,
error_kw=error_config,
label='Eth')
else :
rects1 = plt.bar(index, means_bars1, bar_width,
alpha=opacity,
color='y',
yerr=std_bars1,
error_kw=error_config,
label='Broadcast')
rects2 = plt.bar(index + bar_width, means_bars2, bar_width,
alpha=opacity,
color='g',
yerr=std_bars2,
error_kw=error_config,
label='VOD')
plt.xlabel('Number of Client')
if packet == "frames" :
plt.ylabel('Frames sent over UDP')
if packet == "bytes" :
plt.ylabel('Bytes sent over UDP')
if plot == "broad_all":
plt.title('Packet sent by number of client in broadcast mode')
if plot == "vod_all":
plt.title('Packet sent by number of client in VOD mode')
if plot == "vod_broad_cli":
plt.title('Packet received in average by client in broadcast and vod mode')
if plot == "vod_broad_wlan":
plt.title('Packet sent in average to the clients in broadcast and vod mode')
if plot == "vod_broad_eth":
plt.title('Packet received in average by the wifi center in broadcast and vod mode')
plt.xticks(index + bar_width, ('1', '3', '5'))
plt.legend()
#plt.tight_layout()
plt.show()
| gpl-3.0 |
BioMedIA/IRTK | wrapping/cython/irtk/ext/Show3D.py | 5 | 3940 | __all__ = [ "render" ]
import vtk
import numpy as np
import sys
from scipy.stats.mstats import mquantiles
import scipy.interpolate
from vtk.util.vtkConstants import *
import matplotlib.pyplot as plt
def numpy2VTK(img,spacing=[1.0,1.0,1.0]):
# evolved from code from Stou S.,
# on http://www.siafoo.net/snippet/314
importer = vtk.vtkImageImport()
img_data = img.astype('uint8')
img_string = img_data.tostring() # type short
dim = img.shape
importer.CopyImportVoidPointer(img_string, len(img_string))
importer.SetDataScalarType(VTK_UNSIGNED_CHAR)
importer.SetNumberOfScalarComponents(1)
extent = importer.GetDataExtent()
importer.SetDataExtent(extent[0], extent[0] + dim[2] - 1,
extent[2], extent[2] + dim[1] - 1,
extent[4], extent[4] + dim[0] - 1)
importer.SetWholeExtent(extent[0], extent[0] + dim[2] - 1,
extent[2], extent[2] + dim[1] - 1,
extent[4], extent[4] + dim[0] - 1)
importer.SetDataSpacing( spacing[0], spacing[1], spacing[2])
importer.SetDataOrigin( 0,0,0 )
return importer
def volumeRender(img, tf=[],spacing=[1.0,1.0,1.0]):
importer = numpy2VTK(img,spacing)
# Transfer Functions
opacity_tf = vtk.vtkPiecewiseFunction()
color_tf = vtk.vtkColorTransferFunction()
if len(tf) == 0:
tf.append([img.min(),0,0,0,0])
tf.append([img.max(),1,1,1,1])
for p in tf:
color_tf.AddRGBPoint(p[0], p[1], p[2], p[3])
opacity_tf.AddPoint(p[0], p[4])
volMapper = vtk.vtkGPUVolumeRayCastMapper()
volMapper.SetInputConnection(importer.GetOutputPort())
# The property describes how the data will look
volProperty = vtk.vtkVolumeProperty()
volProperty.SetColor(color_tf)
volProperty.SetScalarOpacity(opacity_tf)
volProperty.ShadeOn()
volProperty.SetInterpolationTypeToLinear()
vol = vtk.vtkVolume()
vol.SetMapper(volMapper)
vol.SetProperty(volProperty)
return [vol]
def vtk_basic( actors ):
"""
Create a window, renderer, interactor, add the actors and start the thing
Parameters
----------
actors : list of vtkActors
Returns
-------
nothing
"""
# create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(600,600)
# ren.SetBackground( 1, 1, 1)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
for a in actors:
# assign actor to the renderer
ren.AddActor(a )
# render
renWin.Render()
# enable user interface interactor
iren.Initialize()
iren.Start()
def render( img, tf=None, colors=None, opacity=None ):
"""
tf: transfert function of the form [[voxel_value, r, g, b, opacity]]
"""
data = img.rescale().get_data(dtype='uint8')
if colors is not None:
tf = plt.cm.get_cmap(colors, 256)(range(256))*255
tf = np.hstack( (np.arange(256).reshape(256,1), tf[:,:3]) )
if opacity is not None:
opacity = np.array(opacity)
x = opacity[:,0]
y = opacity[:,1]
f = scipy.interpolate.interp1d(x, y, kind='linear')
tf = np.hstack( (tf, f(range(256)).reshape((256,1)) ) )
else:
tf = np.hstack( (tf,
np.linspace(0,1,len(tf)).reshape(len(tf),1),
np.linspace(0,1,len(tf)).reshape(len(tf),1) ) )
if tf is None:
q = mquantiles(data.flatten(),[0.5,0.98])
q[0]=max(q[0],1)
q[1] = max(q[1],1)
tf=[[0,0,0,0,0],[q[0],0,0,0,0],[q[1],1,1,1,0.5],[data.max(),1,1,1,1]]
actor_list = volumeRender(data, tf=tf, spacing=img.header['pixelSize'][:3])
vtk_basic(actor_list)
| apache-2.0 |
jayhetee/dask | dask/array/learn.py | 10 | 3061 | from .core import names
from .. import threaded
from toolz import merge, partial
def _partial_fit(model, x, y, kwargs=None):
kwargs = kwargs or dict()
model.partial_fit(x, y, **kwargs)
return model
def fit(model, x, y, get=threaded.get, **kwargs):
""" Fit scikit learn model against dask arrays
Model must support the ``partial_fit`` interface for online or batch
learning.
This method will be called on dask arrays in sequential order. Ideally
your rows are independent and identically distributed.
Parameters
----------
model: sklearn model
Any model supporting partial_fit interface
x: dask Array
Two dimensional array, likely tall and skinny
y: dask Array
One dimensional array with same chunks as x's rows
kwargs:
options to pass to partial_fit
Example
-------
>>> import dask.array as da
>>> X = da.random.random((10, 3), chunks=(5, 3))
>>> y = da.random.random(10, chunks=(5,))
>>> from sklearn.linear_model import SGDClassifier
>>> sgd = SGDClassifier()
>>> sgd = da.learn.fit(sgd, X, y, classes=[1, 0])
>>> sgd # doctest: +SKIP
SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15, learning_rate='optimal',
loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5,
random_state=None, shuffle=False, verbose=0, warm_start=False)
This passes all of X and y through the classifier sequentially. We can use
the classifier as normal on in-memory data
>>> import numpy as np
>>> sgd.predict(np.random.random((4, 3))) # doctest: +SKIP
array([1, 0, 0, 1])
Or predict on a larger dataset
>>> z = da.random.random((400, 3), chunks=(100, 3))
>>> da.learn.predict(sgd, z) # doctest: +SKIP
dask.array<x_11, shape=(400,), chunks=((100, 100, 100, 100),), dtype=int64>
"""
assert x.ndim == 2
assert y.ndim == 1
assert x.chunks[0] == y.chunks[0]
assert hasattr(model, 'partial_fit')
if len(x.chunks[1]) > 1:
x = x.reblock(chunks=(x.chunks[0], sum(x.chunks[1])))
nblocks = len(x.chunks[0])
name = next(names)
dsk = {(name, -1): model}
dsk.update(dict(((name, i), (_partial_fit, (name, i - 1),
(x.name, i, 0),
(y.name, i),
kwargs))
for i in range(nblocks)))
return get(merge(x.dask, y.dask, dsk), (name, nblocks - 1))
def _predict(model, x):
return model.predict(x)[:, None]
def predict(model, x):
""" Predict with a scikit learn model
Parameters
----------
model: scikit learn classifier
x: dask Array
See docstring for ``da.learn.fit``
"""
assert x.ndim == 2
if len(x.chunks[1]) > 1:
x = x.reblock(chunks=(x.chunks[0], sum(x.chunks[1])))
func = partial(_predict, model)
return x.map_blocks(func, chunks=(x.chunks[0], (1,))).squeeze()
| bsd-3-clause |
aabadie/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 20 | 51086 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
gxx/lettuce | lettuce/django/steps/mail.py | 20 | 1903 | """
Step definitions for working with Django email.
"""
from smtplib import SMTPException
from django.core import mail
from lettuce import step
STEP_PREFIX = r'(?:Given|And|Then|When) '
CHECK_PREFIX = r'(?:And|Then) '
EMAIL_PARTS = ('subject', 'body', 'from_email', 'to', 'bcc', 'cc')
GOOD_MAIL = mail.EmailMessage.send
@step(CHECK_PREFIX + r'I have sent (\d+) emails?')
def mail_sent_count(step, count):
"""
Then I have sent 2 emails
"""
count = int(count)
assert len(mail.outbox) == count, "Length of outbox is {0}".format(count)
@step(r'I have not sent any emails')
def mail_not_sent(step):
"""
I have not sent any emails
"""
return mail_sent_count(step, 0)
@step(CHECK_PREFIX + (r'I have sent an email with "([^"]*)" in the ({0})'
'').format('|'.join(EMAIL_PARTS)))
def mail_sent_content(step, text, part):
"""
Then I have sent an email with "pandas" in the body
"""
assert any(text in getattr(email, part)
for email
in mail.outbox
), "An email contained expected text in the {0}".format(part)
@step(CHECK_PREFIX + r'I have sent an email with the following in the body:')
def mail_sent_content_multiline(step):
"""
I have sent an email with the following in the body:
\"""
Name: Mr. Panda
\"""
"""
return mail_sent_content(step, step.multiline, 'body')
@step(STEP_PREFIX + r'I clear my email outbox')
def mail_clear(step):
"""
I clear my email outbox
"""
mail.EmailMessage.send = GOOD_MAIL
mail.outbox = []
def broken_send(*args, **kwargs):
"""
Broken send function for email_broken step
"""
raise SMTPException("Failure mocked by lettuce")
@step(STEP_PREFIX + r'sending email does not work')
def email_broken(step):
"""
Break email sending
"""
mail.EmailMessage.send = broken_send
| gpl-3.0 |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_traps/traps15/task1_scene_classification.py | 40 | 38423 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# DCASE 2016::Acoustic Scene Classification / Baseline System
import argparse
import textwrap
import timeit
import skflow
from sklearn import mixture
from sklearn import preprocessing as pp
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix
from src.dataset import *
from src.evaluation import *
from src.features import *
__version_info__ = ('1', '0', '0')
__version__ = '.'.join(__version_info__)
final_result = {}
train_start = 0.0
train_end = 0.0
test_start = 0.0
test_end = 0.0
def main(argv):
numpy.random.seed(123456) # let's make randomization predictable
tot_start = timeit.default_timer()
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
DCASE 2016
Task 1: Acoustic Scene Classification
Baseline system
---------------------------------------------
Tampere University of Technology / Audio Research Group
Author: Toni Heittola ( [email protected] )
System description
This is an baseline implementation for D-CASE 2016 challenge acoustic scene classification task.
Features: MFCC (static+delta+acceleration)
Classifier: GMM
'''))
# Setup argument handling
parser.add_argument("-development", help="Use the system in the development mode", action='store_true',
default=False, dest='development')
parser.add_argument("-challenge", help="Use the system in the challenge mode", action='store_true',
default=False, dest='challenge')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
# Load parameters from config file
parameter_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.splitext(os.path.basename(__file__))[0] + '.yaml')
params = load_parameters(parameter_file)
params = process_parameters(params)
make_folders(params)
title("DCASE 2016::Acoustic Scene Classification / Baseline System")
# Check if mode is defined
if not (args.development or args.challenge):
args.development = True
args.challenge = False
dataset_evaluation_mode = 'folds'
if args.development and not args.challenge:
print "Running system in development mode"
dataset_evaluation_mode = 'folds'
elif not args.development and args.challenge:
print "Running system in challenge mode"
dataset_evaluation_mode = 'full'
# Get dataset container class
dataset = eval(params['general']['development_dataset'])(data_path=params['path']['data'])
# Fetch data over internet and setup the data
# ==================================================
if params['flow']['initialize']:
dataset.fetch()
# Extract features for all audio files in the dataset
# ==================================================
if params['flow']['extract_features']:
section_header('Feature extraction')
# Collect files in train sets
files = []
for fold in dataset.folds(mode=dataset_evaluation_mode):
for item_id, item in enumerate(dataset.train(fold)):
if item['file'] not in files:
files.append(item['file'])
for item_id, item in enumerate(dataset.test(fold)):
if item['file'] not in files:
files.append(item['file'])
files = sorted(files)
# Go through files and make sure all features are extracted
do_feature_extraction(files=files,
dataset=dataset,
feature_path=params['path']['features'],
params=params['features'],
overwrite=params['general']['overwrite'])
foot()
# Prepare feature normalizers
# ==================================================
if params['flow']['feature_normalizer']:
section_header('Feature normalizer')
do_feature_normalization(dataset=dataset,
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite'])
foot()
# System training
# ==================================================
if params['flow']['train_system']:
section_header('System training')
train_start = timeit.default_timer()
do_system_training(dataset=dataset,
model_path=params['path']['models'],
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
classifier_params=params['classifier']['parameters'],
classifier_method=params['classifier']['method'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite']
)
train_end = timeit.default_timer()
foot()
# System evaluation in development mode
if args.development and not args.challenge:
# System testing
# ==================================================
if params['flow']['test_system']:
section_header('System testing')
test_start = timeit.default_timer()
do_system_testing(dataset=dataset,
feature_path=params['path']['features'],
result_path=params['path']['results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=params['general']['overwrite']
)
test_end = timeit.default_timer()
foot()
# System evaluation
# ==================================================
if params['flow']['evaluate_system']:
section_header('System evaluation')
do_system_evaluation(dataset=dataset,
dataset_evaluation_mode=dataset_evaluation_mode,
result_path=params['path']['results'])
foot()
# System evaluation with challenge data
elif not args.development and args.challenge:
# Fetch data over internet and setup the data
challenge_dataset = eval(params['general']['challenge_dataset'])()
if params['flow']['initialize']:
challenge_dataset.fetch()
# System testing
if params['flow']['test_system']:
section_header('System testing with challenge data')
do_system_testing(dataset=challenge_dataset,
feature_path=params['path']['features'],
result_path=params['path']['challenge_results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=True
)
foot()
print " "
print "Your results for the challenge data are stored at [" + params['path']['challenge_results'] + "]"
print " "
tot_end = timeit.default_timer()
print " "
print "Train Time : " + str(train_end - train_start)
print " "
print " "
print "Test Time : " + str(test_end - test_start)
print " "
print " "
print "Total Time : " + str(tot_end - tot_start)
print " "
final_result['train_time'] = train_end - train_start
final_result['test_time'] = test_end - test_start
final_result['tot_time'] = tot_end - tot_start
joblib.dump(final_result, 'result.pkl')
return 0
def process_parameters(params):
"""Parameter post-processing.
Parameters
----------
params : dict
parameters in dict
Returns
-------
params : dict
processed parameters
"""
# Convert feature extraction window and hop sizes seconds to samples
params['features']['mfcc']['win_length'] = int(params['features']['win_length_seconds'] * params['features']['fs'])
params['features']['mfcc']['hop_length'] = int(params['features']['hop_length_seconds'] * params['features']['fs'])
# Copy parameters for current classifier method
params['classifier']['parameters'] = params['classifier_parameters'][params['classifier']['method']]
# Hash
params['features']['hash'] = get_parameter_hash(params['features'])
params['classifier']['hash'] = get_parameter_hash(params['classifier'])
# Paths
params['path']['data'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['data'])
params['path']['base'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['base'])
# Features
params['path']['features_'] = params['path']['features']
params['path']['features'] = os.path.join(params['path']['base'],
params['path']['features'],
params['features']['hash'])
# Feature normalizers
params['path']['feature_normalizers_'] = params['path']['feature_normalizers']
params['path']['feature_normalizers'] = os.path.join(params['path']['base'],
params['path']['feature_normalizers'],
params['features']['hash'])
# Models
params['path']['models_'] = params['path']['models']
params['path']['models'] = os.path.join(params['path']['base'],
params['path']['models'],
params['features']['hash'], params['classifier']['hash'])
# Results
params['path']['results_'] = params['path']['results']
params['path']['results'] = os.path.join(params['path']['base'],
params['path']['results'],
params['features']['hash'], params['classifier']['hash'])
return params
def make_folders(params, parameter_filename='parameters.yaml'):
"""Create all needed folders, and saves parameters in yaml-file for easier manual browsing of data.
Parameters
----------
params : dict
parameters in dict
parameter_filename : str
filename to save parameters used to generate the folder name
Returns
-------
nothing
"""
# Check that target path exists, create if not
check_path(params['path']['features'])
check_path(params['path']['feature_normalizers'])
check_path(params['path']['models'])
check_path(params['path']['results'])
# Save parameters into folders to help manual browsing of files.
# Features
feature_parameter_filename = os.path.join(params['path']['features'], parameter_filename)
if not os.path.isfile(feature_parameter_filename):
save_parameters(feature_parameter_filename, params['features'])
# Feature normalizers
feature_normalizer_parameter_filename = os.path.join(params['path']['feature_normalizers'], parameter_filename)
if not os.path.isfile(feature_normalizer_parameter_filename):
save_parameters(feature_normalizer_parameter_filename, params['features'])
# Models
model_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(model_features_parameter_filename):
save_parameters(model_features_parameter_filename, params['features'])
model_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(model_models_parameter_filename):
save_parameters(model_models_parameter_filename, params['classifier'])
# Results
# Save parameters into folders to help manual browsing of files.
result_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(result_features_parameter_filename):
save_parameters(result_features_parameter_filename, params['features'])
result_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(result_models_parameter_filename):
save_parameters(result_models_parameter_filename, params['classifier'])
def get_feature_filename(audio_file, path, extension='cpickle'):
"""Get feature filename
Parameters
----------
audio_file : str
audio file name from which the features are extracted
path : str
feature path
extension : str
file extension
(Default value='cpickle')
Returns
-------
feature_filename : str
full feature filename
"""
audio_filename = os.path.split(audio_file)[1]
return os.path.join(path, os.path.splitext(audio_filename)[0] + '.' + extension)
def get_feature_normalizer_filename(fold, path, extension='cpickle'):
"""Get normalizer filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
normalizer path
extension : str
file extension
(Default value='cpickle')
Returns
-------
normalizer_filename : str
full normalizer filename
"""
return os.path.join(path, 'scale_fold' + str(fold) + '.' + extension)
def get_model_filename(fold, path, extension='cpickle'):
"""Get model filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
model path
extension : str
file extension
(Default value='cpickle')
Returns
-------
model_filename : str
full model filename
"""
return os.path.join(path, 'model_fold' + str(fold) + '.' + extension)
def get_result_filename(fold, path, extension='txt'):
"""Get result filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
result path
extension : str
file extension
(Default value='cpickle')
Returns
-------
result_filename : str
full result filename
"""
if fold == 0:
return os.path.join(path, 'results.' + extension)
else:
return os.path.join(path, 'results_fold' + str(fold) + '.' + extension)
def do_feature_extraction(files, dataset, feature_path, params, overwrite=False):
"""Feature extraction
Parameters
----------
files : list
file list
dataset : class
dataset class
feature_path : str
path where the features are saved
params : dict
parameter dict
overwrite : bool
overwrite existing feature files
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Audio file not found.
"""
# Check that target path exists, create if not
check_path(feature_path)
for file_id, audio_filename in enumerate(files):
# Get feature filename
current_feature_file = get_feature_filename(audio_file=os.path.split(audio_filename)[1], path=feature_path)
progress(title_text='Extracting',
percentage=(float(file_id) / len(files)),
note=os.path.split(audio_filename)[1])
if not os.path.isfile(current_feature_file) or overwrite:
# Load audio data
if os.path.isfile(dataset.relative_to_absolute_path(audio_filename)):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(audio_filename), mono=True,
fs=params['fs'])
else:
raise IOError("Audio file not found [%s]" % audio_filename)
# Extract features
if params['method'] == 'lfcc':
feature_file_txt = get_feature_filename(audio_file=os.path.split(audio_filename)[1],
path=feature_path,
extension='txt')
feature_data = feature_extraction_lfcc(feature_file_txt)
elif params['method'] == 'traps':
feature_data = feature_extraction_traps(y=y,
fs=fs,
traps_params=params['traps'],
mfcc_params=params['mfcc'])
else:
# feature_data['feat'].shape is (1501, 60)
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=params['include_mfcc0'],
include_delta=params['include_delta'],
include_acceleration=params['include_acceleration'],
mfcc_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
# Save
save_data(current_feature_file, feature_data)
def do_feature_normalization(dataset, feature_normalizer_path, feature_path, dataset_evaluation_mode='folds',
overwrite=False):
"""Feature normalization
Calculated normalization factors for each evaluation fold based on the training material available.
Parameters
----------
dataset : class
dataset class
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
overwrite : bool
overwrite existing normalizers
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Feature file not found.
"""
# Check that target path exists, create if not
check_path(feature_normalizer_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_normalizer_file = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if not os.path.isfile(current_normalizer_file) or overwrite:
# Initialize statistics
file_count = len(dataset.train(fold))
normalizer = FeatureNormalizer()
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
if os.path.isfile(get_feature_filename(audio_file=item['file'], path=feature_path)):
feature_data = load_data(get_feature_filename(audio_file=item['file'], path=feature_path))['stat']
else:
raise IOError("Feature file not found [%s]" % (item['file']))
# Accumulate statistics
normalizer.accumulate(feature_data)
# Calculate normalization factors
normalizer.finalize()
# Save
save_data(current_normalizer_file, normalizer)
def do_system_training(dataset, model_path, feature_normalizer_path, feature_path, classifier_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System training
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
dataset : class
dataset class
model_path : str
path where the models are saved.
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
classifier_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Feature normalizer not found.
Feature file not found.
"""
if classifier_method != 'gmm' and classifier_method != 'dnn':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Check that target path exists, create if not
check_path(model_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_model_file = get_model_filename(fold=fold, path=model_path)
if not os.path.isfile(current_model_file) or overwrite:
# Load normalizer
feature_normalizer_filename = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if os.path.isfile(feature_normalizer_filename):
normalizer = load_data(feature_normalizer_filename)
else:
raise IOError("Feature normalizer not found [%s]" % feature_normalizer_filename)
# Initialize model container
model_container = {'normalizer': normalizer, 'models': {}}
# Collect training examples
file_count = len(dataset.train(fold))
data = {}
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
raise IOError("Features not found [%s]" % (item['file']))
# Scale features
feature_data = model_container['normalizer'].normalize(feature_data)
# Store features per class label
if item['scene_label'] not in data:
data[item['scene_label']] = feature_data
else:
data[item['scene_label']] = numpy.vstack((data[item['scene_label']], feature_data))
le = pp.LabelEncoder()
tot_data = {}
# Train models for each class
for label in data:
progress(title_text='Train models',
fold=fold,
note=label)
if classifier_method == 'gmm':
model_container['models'][label] = mixture.GMM(**classifier_params).fit(data[label])
elif classifier_method == 'dnn':
if 'x' not in tot_data:
tot_data['x'] = data[label]
tot_data['y'] = numpy.repeat(label, len(data[label]), axis=0)
else:
tot_data['x'] = numpy.vstack((tot_data['x'], data[label]))
tot_data['y'] = numpy.hstack((tot_data['y'], numpy.repeat(label, len(data[label]), axis=0)))
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
clf = skflow.TensorFlowDNNClassifier(**classifier_params)
if classifier_method == 'dnn':
tot_data['y'] = le.fit_transform(tot_data['y'])
clf.fit(tot_data['x'], tot_data['y'])
clf.save('dnn/dnnmodel1')
# Save models
save_data(current_model_file, model_container)
def do_system_testing(dataset, result_path, feature_path, model_path, feature_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System testing.
If extracted features are not found from disk, they are extracted but not saved.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
feature_path : str
path where the features are saved.
model_path : str
path where the models are saved.
feature_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Model file not found.
Audio file not found.
"""
if classifier_method != 'gmm' and classifier_method != 'dnn':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Check that target path exists, create if not
check_path(result_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_result_file = get_result_filename(fold=fold, path=result_path)
if not os.path.isfile(current_result_file) or overwrite:
results = []
# Load class model container
model_filename = get_model_filename(fold=fold, path=model_path)
if os.path.isfile(model_filename):
model_container = load_data(model_filename)
else:
raise IOError("Model file not found [%s]" % model_filename)
file_count = len(dataset.test(fold))
for file_id, item in enumerate(dataset.test(fold)):
progress(title_text='Testing',
fold=fold,
percentage=(float(file_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
# Load audio
if os.path.isfile(dataset.relative_to_absolute_path(item['file'])):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(item['file']), mono=True,
fs=feature_params['fs'])
else:
raise IOError("Audio file not found [%s]" % (item['file']))
if feature_params['method'] == 'lfcc':
feature_file_txt = get_feature_filename(audio_file=os.path.split(item['file'])[1],
path=feature_path,
extension='txt')
feature_data = feature_extraction_lfcc(feature_file_txt)
elif feature_params['method'] == 'traps':
feature_data = feature_extraction_traps(y=y,
fs=fs,
traps_params=params['traps'],
mfcc_params=feature_params['mfcc'],
statistics=False)['feat']
else:
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=feature_params['include_mfcc0'],
include_delta=feature_params['include_delta'],
include_acceleration=feature_params['include_acceleration'],
mfcc_params=feature_params['mfcc'],
delta_params=feature_params['mfcc_delta'],
acceleration_params=feature_params['mfcc_acceleration'],
statistics=False)['feat']
# Normalize features
feature_data = model_container['normalizer'].normalize(feature_data)
# Do classification for the block
if classifier_method == 'gmm':
current_result = do_classification_gmm(feature_data, model_container)
current_class = current_result['class']
elif classifier_method == 'dnn':
current_result = do_classification_dnn(feature_data, model_container)
current_class = dataset.scene_labels[current_result['class_id']]
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Store the result
if classifier_method == 'gmm':
results.append((dataset.absolute_to_relative(item['file']),
current_class))
elif classifier_method == 'dnn':
logs_in_tuple = tuple(lo for lo in current_result['logls'])
results.append((dataset.absolute_to_relative(item['file']),
current_class) + logs_in_tuple)
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Save testing results
with open(current_result_file, 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
writer.writerow(result_item)
def do_classification_dnn(feature_data, model_container):
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(15)
logls.fill(-numpy.inf)
model_clf = skflow.TensorFlowEstimator.restore('dnn/dnnmodel1')
logls = numpy.sum(numpy.log(model_clf.predict_proba(feature_data)), 0)
classification_result_id = numpy.argmax(logls)
return {'class_id': classification_result_id,
'logls': logls}
def do_classification_gmm(feature_data, model_container):
"""GMM classification for give feature matrix
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
feature_data : numpy.ndarray [shape=(t, feature vector length)]
feature matrix
model_container : dict
model container
Returns
-------
result : str
classification result as scene label
"""
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(len(model_container['models']))
logls.fill(-numpy.inf)
for label_id, label in enumerate(model_container['models']):
logls[label_id] = numpy.sum(model_container['models'][label].score(feature_data))
classification_result_id = numpy.argmax(logls)
return {'class': model_container['models'].keys()[classification_result_id],
'logls': logls}
def do_system_evaluation(dataset, result_path, dataset_evaluation_mode='folds'):
"""System evaluation. Testing outputs are collected and evaluated. Evaluation results are printed.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
Returns
-------
nothing
Raises
-------
IOError
Result file not found
"""
dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results_fold = []
tot_cm = numpy.zeros((dataset.scene_label_count, dataset.scene_label_count))
for fold in dataset.folds(mode=dataset_evaluation_mode):
dcase2016_scene_metric_fold = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results = []
result_filename = get_result_filename(fold=fold, path=result_path)
if os.path.isfile(result_filename):
with open(result_filename, 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
results.append(row)
else:
raise IOError("Result file not found [%s]" % result_filename)
# Rewrite the result file
if os.path.isfile(result_filename):
with open(result_filename+'2', 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
y_true = (dataset.file_meta(result_item[0])[0]['scene_label'],)
#print type(y_true)
#print type(result_item)
writer.writerow(y_true + tuple(result_item))
y_true = []
y_pred = []
for result in results:
y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
y_pred.append(result[1])
dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
dcase2016_scene_metric_fold.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
results_fold.append(dcase2016_scene_metric_fold.results())
tot_cm += confusion_matrix(y_true, y_pred)
final_result['tot_cm'] = tot_cm
final_result['tot_cm_acc'] = numpy.sum(numpy.diag(tot_cm)) / numpy.sum(tot_cm)
results = dcase2016_scene_metric.results()
final_result['result'] = results
print " File-wise evaluation, over %d folds" % dataset.fold_count
fold_labels = ''
separator = ' =====================+======+======+==========+ +'
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_labels += " {:8s} |".format('Fold' + str(fold))
separator += "==========+"
print " {:20s} | {:4s} : {:4s} | {:8s} | |".format('Scene label', 'Nref', 'Nsys', 'Accuracy') + fold_labels
print separator
for label_id, label in enumerate(sorted(results['class_wise_accuracy'])):
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold - 1]['class_wise_accuracy'][label] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format(label,
results['class_wise_data'][label]['Nref'],
results['class_wise_data'][label]['Nsys'],
results['class_wise_accuracy'][
label] * 100) + fold_values
print separator
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold - 1]['overall_accuracy'] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format('Overall accuracy',
results['Nref'],
results['Nsys'],
results['overall_accuracy'] * 100) + fold_values
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| mit |
gladk/trunk | examples/simple-scene/simple-scene-plot.py | 8 | 2026 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
NewtonIntegrator(damping=.2,gravity=(0,0,-9.81)),
###
### NOTE this extra engine:
###
### You want snapshot to be taken every 1 sec (realTimeLim) or every 50 iterations (iterLim),
### whichever comes soones. virtTimeLim attribute is unset, hence virtual time period is not taken into account.
PyRunner(iterPeriod=20,command='myAddPlotData()')
]
O.bodies.append(box(center=[0,0,0],extents=[.5,.5,.5],fixed=True,color=[1,0,0]))
O.bodies.append(sphere([0,0,2],1,color=[0,1,0]))
O.dt=.002*PWaveTimeStep()
############################################
##### now the part pertaining to plots #####
############################################
from yade import plot
## we will have 2 plots:
## 1. t as function of i (joke test function)
## 2. i as function of t on left y-axis ('|||' makes the separation) and z_sph, v_sph (as green circles connected with line) and z_sph_half again as function of t
plot.plots={'i':('t'),'t':('z_sph',None,('v_sph','go-'),'z_sph_half')}
## this function is called by plotDataCollector
## it should add data with the labels that we will plot
## if a datum is not specified (but exists), it will be NaN and will not be plotted
def myAddPlotData():
sph=O.bodies[1]
## store some numbers under some labels
plot.addData(t=O.time,i=O.iter,z_sph=sph.state.pos[2],z_sph_half=.5*sph.state.pos[2],v_sph=sph.state.vel.norm())
print "Now calling plot.plot() to show the figures. The timestep is artificially low so that you can watch graphs being updated live."
plot.liveInterval=.2
plot.plot(subPlots=False)
O.run(int(2./O.dt));
#plot.saveGnuplot('/tmp/a')
## you can also access the data in plot.data['i'], plot.data['t'] etc, under the labels they were saved.
| gpl-2.0 |
akhilaananthram/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/artist.py | 69 | 33042 | from __future__ import division
import re, warnings
import matplotlib
import matplotlib.cbook as cbook
from transforms import Bbox, IdentityTransform, TransformedBbox, TransformedPath
from path import Path
## Note, matplotlib artists use the doc strings for set and get
# methods to enable the introspection methods of setp and getp. Every
# set_* method should have a docstring containing the line
#
# ACCEPTS: [ legal | values ]
#
# and aliases for setters and getters should have a docstring that
# starts with 'alias for ', as in 'alias for set_somemethod'
#
# You may wonder why we use so much boiler-plate manually defining the
# set_alias and get_alias functions, rather than using some clever
# python trick. The answer is that I need to be able to manipulate
# the docstring, and there is no clever way to do that in python 2.2,
# as far as I can see - see
# http://groups.google.com/groups?hl=en&lr=&threadm=mailman.5090.1098044946.5135.python-list%40python.org&rnum=1&prev=/groups%3Fq%3D__doc__%2Bauthor%253Ajdhunter%2540ace.bsd.uchicago.edu%26hl%3Den%26btnG%3DGoogle%2BSearch
class Artist(object):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
aname = 'Artist'
zorder = 0
def __init__(self):
self.figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = 1.0
self.clipbox = None
self._clippath = None
self._clipon = True
self._lod = False
self._label = ''
self._picker = None
self._contains = None
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
self.axes = None
self._remove_method = None
self._url = None
self.x_isdata = True # False to avoid updating Axes.dataLim with x
self.y_isdata = True # with y
self._snap = None
def remove(self):
"""
Remove the artist from the figure if possible. The effect
will not be visible until the figure is redrawn, e.g., with
:meth:`matplotlib.axes.Axes.draw_idle`. Call
:meth:`matplotlib.axes.Axes.relim` to update the axes limits
if desired.
Note: :meth:`~matplotlib.axes.Axes.relim` will not see
collections even if the collection was added to axes with
*autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead the parent should set
# the _remove_method attribute directly. This would be a protected
# attribute if Python supported that sort of thing. The callback
# has one parameter, which is the child to be removed.
if self._remove_method != None:
self._remove_method(self)
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property
# of whether or not the artist should affect the limits. Then there
# will be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
'Return *True* if units are set on the *x* or *y* axes'
ax = self.axes
if ax is None or ax.xaxis is None:
return False
return ax.xaxis.have_units() or ax.yaxis.have_units()
def convert_xunits(self, x):
"""For artists in an axes, if the xaxis has units support,
convert *x* using xaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
#print 'artist.convert_xunits no conversion: ax=%s'%ax
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""For artists in an axes, if the yaxis has units support,
convert *y* using yaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None: return y
return ax.yaxis.convert_units(y)
def set_axes(self, axes):
"""
Set the :class:`~matplotlib.axes.Axes` instance in which the
artist resides, if any.
ACCEPTS: an :class:`~matplotlib.axes.Axes` instance
"""
self.axes = axes
def get_axes(self):
"""
Return the :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*
"""
return self.axes
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
"""
try: del self._propobservers[oid]
except KeyError: pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in self._propobservers.items():
func(self)
def is_transform_set(self):
"""
Returns *True* if :class:`Artist` has a transform explicitly
set.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the :class:`~matplotlib.transforms.Transform` instance
used by this artist.
ACCEPTS: :class:`~matplotlib.transforms.Transform` instance
"""
self._transform = t
self._transformSet = True
self.pchanged()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this artist.
"""
if self._transform is None:
self._transform = IdentityTransform()
return self._transform
def hitlist(self, event):
"""
List the children of the artist which contain the mouse event *event*.
"""
import traceback
L = []
try:
hascursor,info = self.contains(event)
if hascursor:
L.append(self)
except:
traceback.print_exc()
print "while checking",self.__class__
for a in self.get_children():
L.extend(a.hitlist(event))
return L
def get_children(self):
"""
Return a list of the child :class:`Artist`s this
:class:`Artist` contains.
"""
return []
def contains(self, mouseevent):
"""Test whether the artist contains the mouse event.
Returns the truth value and a dictionary of artist specific details of
selection, such as which points are contained in the pick radius. See
individual artists for details.
"""
if callable(self._contains): return self._contains(self,mouseevent)
#raise NotImplementedError,str(self.__class__)+" needs 'contains' method"
warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
return False,{}
def set_contains(self,picker):
"""
Replace the contains test used by this artist. The new picker
should be a callable function which determines whether the
artist is hit by the mouse event::
hit, props = picker(artist, mouseevent)
If the mouse event is over the artist, return *hit* = *True*
and *props* is a dictionary of properties you want returned
with the contains test.
ACCEPTS: a callable function
"""
self._contains = picker
def get_contains(self):
"""
Return the _contains test used by the artist, or *None* for default.
"""
return self._contains
def pickable(self):
'Return *True* if :class:`Artist` is pickable.'
return (self.figure is not None and
self.figure.canvas is not None and
self._picker is not None)
def pick(self, mouseevent):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set
"""
# Pick self
if self.pickable():
picker = self.get_picker()
if callable(picker):
inside,prop = picker(self,mouseevent)
else:
inside,prop = self.contains(mouseevent)
if inside:
self.figure.canvas.pick_event(mouseevent, self, **prop)
# Pick children
for a in self.get_children():
a.pick(mouseevent)
def set_picker(self, picker):
"""
Set the epsilon for picking used by this artist
*picker* can be one of the following:
* *None*: picking is disabled for this artist (default)
* A boolean: if *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
* A float: if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g. the indices of the data within
epsilon of the pick event
* A function: if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
ACCEPTS: [None|float|boolean|callable]
"""
self._picker = picker
def get_picker(self):
'Return the picker object used by this artist'
return self._picker
def is_figure_set(self):
"""
Returns True if the artist is assigned to a
:class:`~matplotlib.figure.Figure`.
"""
return self.figure is not None
def get_url(self):
"""
Returns the url
"""
return self._url
def set_url(self, url):
"""
Sets the url for the artist
"""
self._url = url
def get_snap(self):
"""
Returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg backends.
"""
return self._snap
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg backends.
"""
self._snap = snap
def get_figure(self):
"""
Return the :class:`~matplotlib.figure.Figure` instance the
artist belongs to.
"""
return self.figure
def set_figure(self, fig):
"""
Set the :class:`~matplotlib.figure.Figure` instance the artist
belongs to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
self.figure = fig
self.pchanged()
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
self.clipbox = clipbox
self.pchanged()
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
from patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(), path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPath(
path.get_path(),
path.get_transform())
success = True
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
if not success:
print type(path), type(transform)
raise TypeError("Invalid arguments to set_clip_path")
self.pchanged()
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends
"""
return self._alpha
def get_visible(self):
"Return the artist's visiblity"
return self._visible
def get_animated(self):
"Return the artist's animated state"
return self._animated
def get_clip_on(self):
'Return whether artist uses clipping'
return self._clipon
def get_clip_box(self):
'Return artist clipbox'
return self.clipbox
def get_clip_path(self):
'Return artist clip path'
return self._clippath
def get_transformed_clip_path_and_affine(self):
'''
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
'''
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
ACCEPTS: [True | False]
"""
self._clipon = b
self.pchanged()
def _set_gc_clip(self, gc):
'Set the clip properly for the gc'
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def draw(self, renderer, *args, **kwargs):
'Derived classes drawing method'
if not self.get_visible(): return
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float (0.0 transparent through 1.0 opaque)
"""
self._alpha = alpha
self.pchanged()
def set_lod(self, on):
"""
Set Level of Detail on or off. If on, the artists may examine
things like the pixel width of the axes and draw a subset of
their contents accordingly
ACCEPTS: [True | False]
"""
self._lod = on
self.pchanged()
def set_visible(self, b):
"""
Set the artist's visiblity.
ACCEPTS: [True | False]
"""
self._visible = b
self.pchanged()
def set_animated(self, b):
"""
Set the artist's animation state.
ACCEPTS: [True | False]
"""
self._animated = b
self.pchanged()
def update(self, props):
"""
Update the properties of this :class:`Artist` from the
dictionary *prop*.
"""
store = self.eventson
self.eventson = False
changed = False
for k,v in props.items():
func = getattr(self, 'set_'+k, None)
if func is None or not callable(func):
raise AttributeError('Unknown property %s'%k)
func(v)
changed = True
self.eventson = store
if changed: self.pchanged()
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: any string
"""
self._label = s
self.pchanged()
def get_zorder(self):
"""
Return the :class:`Artist`'s zorder.
"""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
ACCEPTS: any number
"""
self.zorder = level
self.pchanged()
def update_from(self, other):
'Copy properties from *other* to *self*.'
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._lod = other._lod
self._label = other._label
self.pchanged()
def set(self, **kwargs):
"""
A tkstyle set command, pass *kwargs* to set properties
"""
ret = []
for k,v in kwargs.items():
k = k.lower()
funcName = "set_%s"%k
func = getattr(self,funcName)
ret.extend( [func(v)] )
return ret
def findobj(self, match=None):
"""
pyplot signature:
findobj(o=gcf(), match=None)
Recursively find all :class:matplotlib.artist.Artist instances
contained in self.
*match* can be
- None: return all objects contained in artist (including artist)
- function with signature ``boolean = match(artist)`` used to filter matches
- class instance: eg Line2D. Only return artists of class type
.. plot:: mpl_examples/pylab_examples/findobj_demo.py
"""
if match is None: # always return True
def matchfunc(x): return True
elif cbook.issubclass_safe(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = match
else:
raise ValueError('match must be None, an matplotlib.artist.Artist subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
class ArtistInspector:
"""
A helper class to inspect an :class:`~matplotlib.artist.Artist`
and return information about it's settable properties and their
current values.
"""
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or sequence of
:class:`Artists`. If a sequence is used, we assume it is a
homogeneous sequence (all :class:`Artists` are of the same
type) and it is your responsibility to make sure this is so.
"""
if cbook.iterable(o) and len(o): o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping *fullname* -> *alias* for each *alias* in
the :class:`~matplotlib.artist.ArtistInspector`.
Eg., for lines::
{'markerfacecolor': 'mfc',
'linewidth' : 'lw',
}
"""
names = [name for name in dir(self.o) if
(name.startswith('set_') or name.startswith('get_'))
and callable(getattr(self.o,name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func): continue
docstring = func.__doc__
fullname = docstring[10:]
aliases.setdefault(fullname[4:], {})[name[4:]] = None
return aliases
_get_valid_values_regex = re.compile(r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))")
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the function *set_attr*
for a line that begins with ACCEPTS:
Eg., for a line linestyle, return
[ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
"""
name = 'set_%s'%attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s'%(self.o,name))
func = getattr(self.o, name)
docstring = func.__doc__
if docstring is None: return 'unknown'
if docstring.startswith('alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return match.group(1).replace('\n', ' ')
return 'unknown'
def _get_setters_and_targets(self):
"""
Get the attribute strings and a full path to where the setter
is defined for all setters in an object.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'): continue
o = getattr(self.o, name)
if not callable(o): continue
func = o
if self.is_alias(func): continue
source_class = self.o.__module__ + "." + self.o.__name__
for cls in self.o.mro():
if name in cls.__dict__:
source_class = cls.__module__ + "." + cls.__name__
break
setters.append((name[4:], source_class + "." + name))
return setters
def get_setters(self):
"""
Get the attribute strings with setters for object. Eg., for a line,
return ``['markerfacecolor', 'linewidth', ....]``.
"""
return [prop for prop, target in self._get_setters_and_targets()]
def is_alias(self, o):
"""
Return *True* if method object *o* is an alias for another
function.
"""
ds = o.__doc__
if ds is None: return False
return ds.startswith('alias for ')
def aliased_name(self, s):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME.
E.g. for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
return s + ''.join([' or %s' % x for x in self.aliasd[s].keys()])
else:
return s
def aliased_name_rest(self, s, target):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME formatted for ReST
E.g. for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
aliases = ''.join([' or %s' % x for x in self.aliasd[s].keys()])
else:
aliases = ''
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' %(pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' %(pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values. Format the output for ReST
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' %(pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
########
names = [self.aliased_name_rest(prop, target) for prop, target in attrs]
accepts = [self.get_valid_values(prop) for prop, target in attrs]
col0_len = max([len(n) for n in names])
col1_len = max([len(a) for a in accepts])
table_formatstr = pad + '='*col0_len + ' ' + '='*col1_len
lines.append('')
lines.append(table_formatstr)
lines.append(pad + 'Property'.ljust(col0_len+3) + \
'Description'.ljust(col1_len))
lines.append(table_formatstr)
lines.extend([pad + n.ljust(col0_len+3) + a.ljust(col1_len)
for n, a in zip(names, accepts)])
lines.append(table_formatstr)
lines.append('')
return lines
########
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name_rest(prop, path)
lines.append('%s%s: %s' %(pad, name, accepts))
return lines
def pprint_getters(self):
"""
Return the getters and actual values as list of strings.
"""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_')
and callable(getattr(o, name))]
#print getters
getters.sort()
lines = []
for name in getters:
func = getattr(o, name)
if self.is_alias(func): continue
try: val = func()
except: continue
if getattr(val, 'shape', ()) != () and len(val)>6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s)>50:
s = s[:50] + '...'
name = self.aliased_name(name[4:])
lines.append(' %s = %s' %(name, s))
return lines
def findobj(self, match=None):
"""
Recursively find all :class:`matplotlib.artist.Artist`
instances contained in *self*.
If *match* is not None, it can be
- function with signature ``boolean = match(artist)``
- class instance: eg :class:`~matplotlib.lines.Line2D`
used to filter matches.
"""
if match is None: # always return True
def matchfunc(x): return True
elif issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = func
else:
raise ValueError('match must be None, an matplotlib.artist.Artist subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
def getp(o, property=None):
"""
Return the value of handle property. property is an optional string
for the property you want to return
Example usage::
getp(o) # get all the object properties
getp(o, 'linestyle') # get the linestyle property
*o* is a :class:`Artist` instance, eg
:class:`~matplotllib.lines.Line2D` or an instance of a
:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
If the *property* is 'somename', this function returns
o.get_somename()
:func:`getp` can be used to query all the gettable properties with
``getp(o)``. Many properties have aliases for shorter typing, e.g.
'lw' is an alias for 'linewidth'. In the output, aliases and full
property names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
"""
insp = ArtistInspector(o)
if property is None:
ret = insp.pprint_getters()
print '\n'.join(ret)
return
func = getattr(o, 'get_' + property)
return func()
# alias
get = getp
def setp(h, *args, **kwargs):
"""
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide the
name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
:func:`setp` operates on a single instance or a list of instances.
If you are in query mode introspecting the possible values, only
the first instance in the sequence is used. When actually setting
values, all the instances will be set. E.g., suppose you have a
list of two lines, the following will make both lines thicker and
red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the matlab(TM) style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', r') # matlab style
>>> setp(lines, linewidth=2, color='r') # python style
"""
insp = ArtistInspector(h)
if len(kwargs)==0 and len(args)==0:
print '\n'.join(insp.pprint_setters())
return
if len(kwargs)==0 and len(args)==1:
print insp.pprint_setters(prop=args[0])
return
if not cbook.iterable(h): h = [h]
else: h = cbook.flatten(h)
if len(args)%2:
raise ValueError('The set args must be string, value pairs')
funcvals = []
for i in range(0, len(args)-1, 2):
funcvals.append((args[i], args[i+1]))
funcvals.extend(kwargs.items())
ret = []
for o in h:
for s, val in funcvals:
s = s.lower()
funcName = "set_%s"%s
func = getattr(o,funcName)
ret.extend( [func(val)] )
return [x for x in cbook.flatten(ret)]
def kwdoc(a):
hardcopy = matplotlib.rcParams['docstring.hardcopy']
if hardcopy:
return '\n'.join(ArtistInspector(a).pprint_setters_rest(leadingspace=2))
else:
return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
kwdocd = dict()
kwdocd['Artist'] = kwdoc(Artist)
| agpl-3.0 |
ppp2006/runbot_number0 | qbo_stereo_anaglyph/hrl_lib/src/hrl_lib/pca.py | 3 | 2995 | #
# Copyright (c) 2009, Georgia Tech Research Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Georgia Tech Research Corporation nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# \author Advait Jain (Healthcare Robotics Lab, Georgia Tech.)
#
# standard stuff, but with a bit of visualization.
#
import numpy as np, math
# @return numpy matrix where each column is a new sample.
def sample_gaussian(mean, cov, n_samples):
mean = np.array(mean).flatten()
cov = np.array(cov)
s = np.random.multivariate_normal(mean, cov, (n_samples,))
return np.matrix(s).T
# x - numpy matrix. each column is a new datapoint
def pca(x):
U, sig, _ = np.linalg.svd(np.matrix(np.cov(x)))
return U, sig
# untested.
def dimen_reduc(x, n_dim):
U, sig = pca(x)
proj_mat = np.matrix(U[:,0:n_dim])
mn = np.mean(x, 1)
x_projected = proj_mat.T * (x - mn)
return x_projected.A
if __name__ == '__main__':
import matplotlib.pyplot as pp
import roslib; roslib.load_manifest('hrl_lib')
import hrl_lib.matplotlib_util as mpu
pp.axis('equal')
mn = np.matrix((1., 5.)).T
cov = np.matrix([[2.,50.],[50, 100]])
s = sample_gaussian(mn, cov, 1000)
P = np.cov(s)
mpu.plot_ellipse_cov(mn, P, 'k', 'w')
pp.plot(s[0,:].A1, s[1,:].A1, '.y', ms=3)
U, sig = pca(s)
mn = np.mean(s, 1)
pp.plot(mn[0,:].A1, mn[1,:].A1, 'ok', ms=5)
p1 = mn + U[:,0] * math.sqrt(sig[0])
p2 = mn + U[:,1] * math.sqrt(sig[1])
pp.plot([p1[0,0], mn[0,0], p2[0,0]], [p1[1,0], mn[1,0], p2[1,0]],
'k-', linewidth=2)
pp.show()
| lgpl-2.1 |
caikehe/YELP_DS | Blending.py | 2 | 2128 | #!/usr/bin/env python
# encoding: utf-8
"""
This code implemented review texts classication by using Support Vector Machine, Support Vector Regression,
Decision Tree and Random Forest, the evaluation function has been implemented as well.
"""
from time import gmtime, strftime
from sklearn import ensemble, svm
import Scikit_Classification as sc
features = []
labels = []
def main():
starttime = strftime("%Y-%m-%d %H:%M:%S",gmtime())
config = {}
execfile("params.conf", config)
inputfile = config["histogram_dataset"]
trainingSamples = config["trainingSamples"]
testingSamples = config["testingSamples"]
numberOfSamples = trainingSamples + testingSamples
rf_selectedFeatures = "all"
svm_selectedFeatures = [20, 21, 22, 23, 24]
rf_features, rf_labels = sc.Data_Preparation(inputfile, rf_selectedFeatures)
svm_features, svm_labels = sc.Data_Preparation(inputfile, svm_selectedFeatures)
Scikit_RandomForest_Model = ensemble.RandomForestClassifier(n_estimators=510, criterion='gini', max_depth=7,
min_samples_split=2, min_samples_leaf=1, max_features='sqrt',
bootstrap=True, oob_score=False, n_jobs=-1, random_state=None, verbose=0,
min_density=None, compute_importances=None)
Scikit_SVM_Model = svm.SVC(C=1.0, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, shrinking=True, probability=True, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, random_state=None)
accuracy, testing_Labels, predict_Labels = sc.Classification_Blending(Scikit_RandomForest_Model, rf_features, rf_labels, Scikit_SVM_Model, svm_features, svm_labels, trainingSamples, testingSamples)
sc.Result_Evaluation('data/evaluation_result/evaluation_Blending.txt', accuracy, testing_Labels, predict_Labels)
endtime = strftime("%Y-%m-%d %H:%M:%S",gmtime())
print(starttime)
print(endtime)
if __name__ == "__main__":
main()
| apache-2.0 |
mattilyra/gensim | gensim/sklearn_api/lsimodel.py | 1 | 6165 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <[email protected]>
# Copyright (C) 2017 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for :class:`gensim.models.lsimodel.LsiModel`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
Integrate with sklearn Pipelines:
>>> from sklearn.pipeline import Pipeline
>>> from sklearn import linear_model
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.sklearn_api import LsiTransformer
>>>
>>> # Create stages for our pipeline (including gensim and sklearn models alike).
>>> model = LsiTransformer(num_topics=15, id2word=common_dictionary)
>>> clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
>>> pipe = Pipeline([('features', model,), ('classifier', clf)])
>>>
>>> # Create some random binary labels for our documents.
>>> labels = np.random.choice([0, 1], len(common_corpus))
>>>
>>> # How well does our pipeline perform on the training set?
>>> score = pipe.fit(common_corpus, labels).score(common_corpus, labels)
"""
import numpy as np
from scipy import sparse
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class LsiTransformer(TransformerMixin, BaseEstimator):
"""Base LSI module, wraps :class:`~gensim.models.lsimodel.LsiModel`.
For more information please have a look to `Latent semantic analysis
<https://en.wikipedia.org/wiki/Latent_semantic_analysis>`_.
"""
def __init__(self, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, onepass=True, power_iters=2, extra_samples=100):
"""
Parameters
----------
num_topics : int, optional
Number of requested factors (latent dimensions).
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
ID to word mapping, optional.
chunksize : int, optional
Number of documents to be used in each training chunk.
decay : float, optional
Weight of existing observations relatively to new ones.
onepass : bool, optional
Whether the one-pass algorithm should be used for training, pass `False` to force a
multi-pass stochastic algorithm.
power_iters: int, optional
Number of power iteration steps to be used.
Increasing the number of power iterations improves accuracy, but lowers performance.
extra_samples : int, optional
Extra samples to be used besides the rank `k`. Can improve accuracy.
"""
self.gensim_model = None
self.num_topics = num_topics
self.id2word = id2word
self.chunksize = chunksize
self.decay = decay
self.onepass = onepass
self.extra_samples = extra_samples
self.power_iters = power_iters
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {iterable of list of (int, number), scipy.sparse matrix}
A collection of documents in BOW format to be transformed.
Returns
-------
:class:`~gensim.sklearn_api.lsimodel.LsiTransformer`
The trained model.
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
else:
corpus = X
self.gensim_model = models.LsiModel(
corpus=corpus, num_topics=self.num_topics, id2word=self.id2word, chunksize=self.chunksize,
decay=self.decay, onepass=self.onepass, power_iters=self.power_iters, extra_samples=self.extra_samples
)
return self
def transform(self, docs):
"""Computes the latent factors for `docs`.
Parameters
----------
docs : {iterable of list of (int, number), list of (int, number), scipy.sparse matrix}
Document or collection of documents in BOW format to be transformed.
Returns
-------
numpy.ndarray of shape [`len(docs)`, `num_topics`]
Topic distribution matrix.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
distribution = [matutils.sparse2full(self.gensim_model[doc], self.num_topics) for doc in docs]
return np.reshape(np.array(distribution), (len(docs), self.num_topics))
def partial_fit(self, X):
"""Train model over a potentially incomplete set of documents.
This method can be used in two ways:
1. On an unfitted model in which case the model is initialized and trained on `X`.
2. On an already fitted model in which case the model is **further** trained on `X`.
Parameters
----------
X : {iterable of list of (int, number), scipy.sparse matrix}
Stream of document vectors or sparse matrix of shape: [`num_terms`, `num_documents`].
Returns
-------
:class:`~gensim.sklearn_api.lsimodel.LsiTransformer`
The trained model.
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
if self.gensim_model is None:
self.gensim_model = models.LsiModel(
num_topics=self.num_topics, id2word=self.id2word, chunksize=self.chunksize, decay=self.decay,
onepass=self.onepass, power_iters=self.power_iters, extra_samples=self.extra_samples
)
self.gensim_model.add_documents(corpus=X)
return self
| lgpl-2.1 |
DStauffman/dstauffman2 | dstauffman2/games/pentago/plotting.py | 1 | 7366 | r"""
Plotting module file for the "pentago" game. It defines the plotting functions.
Notes
-----
#. Written by David C. Stauffer in January 2016.
"""
#%% Imports
import doctest
import unittest
from matplotlib.patches import Circle, Rectangle, Wedge
from dstauffman2.games.pentago.constants import COLOR, PLAYER, SIZES
from dstauffman2.games.pentago.utils import calc_cur_move
#%% plot_cur_move
def plot_cur_move(ax, move):
r"""Plots the piece corresponding the current players move."""
# local alias
box_size = SIZES['square']
# fill background
ax.add_patch(Rectangle((-box_size/2, -box_size/2), box_size, box_size, \
facecolor=COLOR['board'], edgecolor='k'))
# draw the piece
if move == PLAYER['white']:
plot_piece(ax, 0, 0, SIZES['piece'], COLOR['white'])
elif move == PLAYER['black']:
plot_piece(ax, 0, 0, SIZES['piece'], COLOR['black'])
elif move == PLAYER['none']:
pass
else:
raise ValueError('Unexpected player.')
# turn the axes back off (they get reinitialized at some point)
ax.set_axis_off()
#%% plot_piece
def plot_piece(ax, vc, hc, r, c, half=False):
r"""
Plots a piece on the board.
Parameters
----------
ax, object
Axis to plot on
vc, float
Vertical center (Y-axis or board row)
hc, float
Horizontal center (X-axis or board column)
r, float
radius
c, 3-tuple
RGB triplet color
half, bool optional, default is false
flag for plotting half a piece
Returns
-------
fill_handle, object
handle to the piece
Examples
--------
>>> from dstauffman2.games.pentago import plot_piece
>>> import matplotlib.pyplot as plt
>>> plt.ioff()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> _ = ax.set_xlim(0.5, 1.5)
>>> _ = ax.set_ylim(0.5, 1.5)
>>> obj = plot_piece(ax, 1, 1, 0.45, (0, 0, 1))
>>> plt.show(block=False) # doctest: +SKIP
>>> plt.close(fig)
"""
# theta angle to sweep out 2*pi
if half:
piece = Wedge((hc, vc), r, 270, 90, facecolor=c, edgecolor='k')
else:
piece = Circle((hc, vc), r, facecolor=c, edgecolor='k')
# plot piece
ax.add_patch(piece)
return piece
#%% plot_board
def plot_board(ax, board):
r"""Plots the board (and the current player move)."""
# get axes limits
(m, n) = board.shape
s = SIZES['square']/2
xmin = 0 - s
xmax = m - 1 + s
ymin = 0 - s
ymax = n - 1 + s
# fill background
ax.add_patch(Rectangle((-xmin-1, -ymin-1), xmax-xmin, ymax-ymin, facecolor=COLOR['board'], \
edgecolor=COLOR['maj_edge']))
# draw minor horizontal lines
ax.plot([1-s, 1-s], [ymin, ymax], color=COLOR['min_edge'])
ax.plot([2-s, 2-s], [ymin, ymax], color=COLOR['min_edge'])
ax.plot([4-s, 4-s], [ymin, ymax], color=COLOR['min_edge'])
ax.plot([5-s, 5-s], [ymin, ymax], color=COLOR['min_edge'])
# draw minor vertical lines
ax.plot([xmin, xmax], [1-s, 1-s], color=COLOR['min_edge'])
ax.plot([xmin, xmax], [2-s, 2-s], color=COLOR['min_edge'])
ax.plot([xmin, xmax], [4-s, 4-s], color=COLOR['min_edge'])
ax.plot([xmin, xmax], [5-s, 5-s], color=COLOR['min_edge'])
# draw major quadrant lines
ax.plot([3-s, 3-s], [ymin, ymax], color=COLOR['maj_edge'], linewidth=2)
ax.plot([xmin, xmax], [3-s, 3-s], color=COLOR['maj_edge'], linewidth=2)
# loop through and place marbles
for i in range(m):
for j in range(n):
if board[i, j] == PLAYER['none']:
pass
elif board[i, j] == PLAYER['white']:
plot_piece(ax, i, j, SIZES['piece'], COLOR['white'])
elif board[i, j] == PLAYER['black']:
plot_piece(ax, i, j, SIZES['piece'], COLOR['black'])
else:
raise ValueError('Bad board position.')
#%% plot_win
def plot_win(ax, mask):
r"""
Plots the winning pieces in red.
Parameters
----------
ax : object
Axis to plot on
mask : 2D bool ndarray
Mask for which squares to plot the win
Examples
--------
>>> from dstauffman2.games.pentago import plot_win
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> plt.ioff()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, aspect='equal')
>>> _ = ax.set_xlim(-0.5, 5.5)
>>> _ = ax.set_ylim(-0.5, 5.5)
>>> ax.invert_yaxis()
>>> mask = np.zeros((6, 6), dtype=bool)
>>> mask[0, 0:5] = True
>>> plot_win(ax, mask)
>>> plt.show(block=False) # doctest: +SKIP
>>> plt.close(fig)
"""
(m, n) = mask.shape
for i in range(m):
for j in range(n):
if mask[i, j]:
plot_piece(ax, i, j, SIZES['win'], COLOR['win'])
#%% plot_possible_win
def plot_possible_win(ax, rot_buttons, white_moves, black_moves, cur_move, cur_game):
r"""
Plots the possible wins on the board.
Examples
--------
>>> from dstauffman2.games.pentago import plot_possible_win, find_moves
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> plt.ioff()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, aspect='equal')
>>> _ = ax.set_xlim(-0.5, 5.5)
>>> _ = ax.set_ylim(-0.5, 5.5)
>>> ax.invert_yaxis()
>>> board = np.reshape(np.hstack((0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, np.zeros(24))), (6, 6))
>>> (white_moves, black_moves) = find_moves(board)
>>> rot_buttons = dict() # TODO: write this # doctest: +SKIP
>>> cur_move = 0
>>> cur_game = 0
>>> plot_possible_win(ax, rot_buttons, white_moves, black_moves, cur_move, cur_game) # doctest: +SKIP
>>> plt.show(block=False) # doctest: +SKIP
>>> plt.close(fig)
"""
# find set of positions to plot
pos_white = set(white_moves)
pos_black = set(black_moves)
# find intersecting positions
pos_both = pos_white & pos_black
# plot the whole pieces
for this_move in pos_white ^ pos_both:
plot_piece(ax, this_move.row, this_move.column, SIZES['win'], COLOR['win_wht'])
rot_buttons[this_move.rot_key].overlay = 'wht'
for this_move in pos_black ^ pos_both:
plot_piece(ax, this_move.row, this_move.column, SIZES['win'], COLOR['win_blk'])
rot_buttons[this_move.rot_key].overlay = 'blk'
# plot the half pieces, with the current players move as whole
next_move = calc_cur_move(cur_move, cur_game)
if next_move == PLAYER['white']:
for this_move in pos_both:
plot_piece(ax, this_move.row, this_move.column, SIZES['win'], COLOR['win_wht'])
plot_piece(ax, this_move.row, this_move.column, SIZES['win'], COLOR['win_blk'], half=True)
rot_buttons[this_move.rot_key].overlay = 'w_b'
elif next_move == PLAYER['black']:
for this_move in pos_both:
plot_piece(ax, this_move.row, this_move.column, SIZES['win'], COLOR['win_blk'])
plot_piece(ax, this_move.row, this_move.column, SIZES['win'], COLOR['win_wht'], half=True)
rot_buttons[this_move.rot_key].overlay = 'b_w'
else:
raise ValueError('Unexpected next player.')
#%% Unit Test
if __name__ == '__main__':
unittest.main(module='dstauffman2.games.pentago.tests.test_plotting', exit=False)
doctest.testmod(verbose=False)
| lgpl-3.0 |
Kruehlio/MUSEspec | analysis/analysis.py | 1 | 19830 | # -*- coding: utf-8 -*-
"""
Performs analysis on MUSE cubes
metGrad : Plots the metallicity as a function of galaxy distance
voronoi_run : Runs a voronoi tesselation code
voronoi_bin : Bins a 2d-map using voronoi tesselation
"""
import numpy as np
import matplotlib.pyplot as plt
import logging
import scipy as sp
from matplotlib.backends.backend_pdf import PdfPages
from .extract import getGalcen, RESTWL
from .maps import getOH
from .formulas import (mcebv, mcohD16, mcTSIII, mcOHOIII, mcSHSIII,
mcDens, mcohPP04, mcSFR)
from ..utils.voronoi import voronoi
from ..utils.astro import bootstrap, geterrs
from ..utils.fitter import linfit, onedgaussfit
logfmt = '%(levelname)s [%(asctime)s]: %(message)s'
datefmt= '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(fmt=logfmt,datefmt=datefmt)
logger = logging.getLogger('__main__')
logging.root.setLevel(logging.DEBUG)
ch = logging.StreamHandler() #console handler
ch.setFormatter(formatter)
logger.handlers = []
logger.addHandler(ch)
def anaSpec(s3d, wl, spec, err, plotLines=False, printFlux=True,
name='', div = 1E3, hasC=0):
lines = ['ha', 'hb', 'niia', 'niib', 'siia', 'siib', 'oiiia', 'oiiib',
'siii6312', 'siii', 'ariii7135', 'ariii7751', 'oii7320',
'oii7331', 'nii5755', 'hei5876', 'hei4922']
lp = {}
if plotLines==True:
pp = PdfPages('%s_%s_%s_lines.pdf' %(s3d.inst, s3d.target, name))
f = open('%s_%s_%s_lines.txt' %(s3d.inst, s3d.target, name), 'w')
sigma = 2
for line in lines:
lp[line] = {}
dx1, dx2 = 12, 12
if line in ['oii7320', 'niia']:
dx1 = 8
if line in ['oii7331', 'siii6312']:
dx2 = 8
if line in ['oiiia', 'oiiib']:
dx1, dx2 = 15, 15
p1 = s3d.wltopix(RESTWL[line]*(1+s3d.z) - dx2)
p2 = s3d.wltopix(RESTWL[line]*(1+s3d.z) + dx1)
x = wl[p1:p2]
y = spec[p1:p2]/div
e = err[p1:p2]/div
fixz, fixs = False, False
if line in ['siii6312', 'ariii7135', 'ariii7751', 'oii7320',
'oii7331', 'nii5755', 'hei5876']:
fixz, fixs = False, False
if hasC == 0:
gp = onedgaussfit(x, y, err=e,
params = [0, np.nanmax(y), RESTWL[line]*(1+s3d.z), sigma],
fixed=[True,False,fixz,fixs])
if hasC == 1:
gp = onedgaussfit(x, y, err=e,
params = [np.nanmedian(y), np.nanmax(y),
RESTWL[line]*(1+s3d.z), 2],
fixed=[False,False,fixz,False])
if line in ['ha', 'oiiib', 'oiiia']:
sigma = gp[0][3]
lineflg = gp[0][1]*gp[0][3]*(3.1416*2)**0.5
linefleg = ((gp[2][1]/gp[0][1])**2 + (gp[2][3]/gp[0][3])**2)**0.5*lineflg
lp[line]['FluxG'] = [lineflg, linefleg]
f.write('\nLine: %s\n' %(line.capitalize()))
f.write('Gauss Chi2/d.o.f: %.3f, %i\n' %(gp[3][0], gp[3][1]))
f.write('Gauss amplitude = %.2f +/- %.2f 10^-17 erg/cm^2/s/AA\n' \
%(gp[0][1], gp[2][1]))
f.write('Gauss mean = %.2f +/- %.2f AA\n' %(gp[0][2], gp[2][2]))
f.write('Gauss sigma = %.2f +/- %.2f AA\n' %(gp[0][3], gp[2][3]))
f.write('Lineflux = %.2f +/- %.2f 10^-17 erg/cm^2/s\n' %(lineflg, linefleg))
linefl, linefle = np.nansum(y[4:-4]-gp[0][0]), np.nansum(e[4:-4]**2)**0.5
f.write('Lineflux sum = %.2f +/- %.2f 10^-17 erg/cm^2/s\n' \
%(linefl, linefle))
lp[line]['Flux'] = [linefl, linefle]
if hasC == 1:
f.write('Gauss base = %.2f +/- %.2f 10^-17 erg/cm^2/s/AA\n' \
%(gp[0][0], gp[2][0]))
ew = linefl/gp[0][0]
ewe = ((linefle/linefl)**2 + (gp[2][0]/gp[0][0])**2)**0.5 *ew
f.write('EW = %.2f +/- %.2f \AA\n' %(ew, ewe))
if plotLines==True:
fig = plt.figure(figsize = (7,4))
fig.subplots_adjust(bottom=0.12, top=0.99, left=0.12, right=0.99)
ax = fig.add_subplot(1, 1, 1)
ax.xaxis.set_major_formatter(plt.FormatStrFormatter(r'$%i$'))
ax.plot(x, y, color = 'black', alpha = 1.0, # rasterized = raster,
drawstyle = 'steps-mid', lw = 1.8, zorder = 1)
ax.plot(gp[-1], gp[1], '-', lw=2)
# Residuals
# fit = onedgaussian(x, gp[0][0], gp[0][1], gp[0][2], gp[0][3])
# ax.errorbar(x, y-fit, yerr=e, capsize = 0, fmt='o', color='black')
ax.set_xlim(x[0],x[-1])
# ax.set_ylim(-0.03, max(0.03, max(gp[1])*1.1))
ax.set_ylabel(r'$F_{\lambda}\,\rm{(10^{-17}\,erg\,s^{-1}\,cm^{-2}\, \AA^{-1})}$')
ax.set_xlabel(r'$\rm{Observed\,wavelength\, (\AA)}$')
text = 'Line: %s\nWL: %.2f AA\nFlux: %.2f +/- %.2f' \
%(line.capitalize(),gp[0][2], linefl, linefle)
plt.figtext(0.65, 0.95, text, size=12, rotation=0.,
ha="left", va="top",
bbox=dict(boxstyle="round", color='lightgrey' ))
pp.savefig(fig)
plt.close(fig)
if plotLines==True:
pp.close()
f.close()
#==============================================================================
# EBV
#==============================================================================
if lp.has_key('ha') and lp.has_key('hb'):
ebv, ebverro = mcebv(lp['ha']['Flux'], lp['hb']['Flux'])
print '\n\n\tEBV = %.3f +/- %.3f mag' %(ebv, ebverro)
for line in lp.keys():
lp[line]['Flux'][0] *= s3d.ebvCor(line, ebv=ebv)
lp[line]['Flux'][1] *= s3d.ebvCor(line, ebv=ebv)
if lp.has_key('siia') and lp.has_key('siia') and lp.has_key('niib'):
oh, oherr = mcohD16(lp['siia']['Flux'], lp['siib']['Flux'],
lp['ha']['Flux'], lp['niib']['Flux'])
print '\t12+log(O/H) (D16) = %.3f +/- %.3f' %(oh, oherr)
if lp.has_key('niia') and lp.has_key('hb') and lp.has_key('ha'):
oho3n2, oherro3n2, ohn2, oherrn2 = mcohPP04(lp['oiiib']['Flux'],
lp['hb']['Flux'], lp['niib']['Flux'], lp['ha']['Flux'])
print '\t12+log(O/H) (PP04, O3N2) = %.3f +/- %.3f' %(oho3n2, oherro3n2)
print '\t12+log(O/H) (PP04, N2) = %.3f +/- %.3f' %(ohn2, oherrn2)
try:
if lp.has_key('siii') and lp.has_key('siii6312'):
tsiii, tsiiierr = mcTSIII(lp['siii']['Flux'], lp['siii6312']['Flux'])
print '\tT_e(SIII) = %.0f +/- %.0f' %(tsiii, tsiiierr)
a, b, c = -0.546, 2.645, -1.276-tsiii/1E4
toiii = ((-b + (b**2 - 4*a*c)**0.5) / (2*a))*1E4
toii = (-0.744 + toiii/1E4*(2.338 - 0.610*toiii/1E4)) * 1E4
print '\tT_e(OII) = %.0f' %(toii)
print '\tT_e(OIII) = %.0f' %(toiii)
if lp.has_key('oiiib') and lp.has_key('oii7320') and lp.has_key('oii7331'):
ohtoiii, ohtoiiie = mcOHOIII(lp['oiiib']['Flux'], lp['oii7320']['Flux'],
lp['oii7331']['Flux'], lp['hb']['Flux'],
toiii, toiii*tsiiierr/tsiii)
print '\t12+log(O/H) T(SIII) = %.3f +/- %.3f' %(ohtoiii, ohtoiiie)
if lp.has_key('siii6312') and lp.has_key('siia') and lp.has_key('siib'):
shsoiii, shsoiiie = mcSHSIII(lp['siii6312']['Flux'], lp['siia']['Flux'],
lp['siib']['Flux'], lp['hb']['Flux'],
tsiii, tsiiierr, toiii)
print '\t12+log(S/H) T(SIII) = %.3f +/- %.3f' %(shsoiii, shsoiiie)
except ValueError:
pass
if lp.has_key('siia') and lp.has_key('siib'):
n, ne = mcDens(lp['siia']['Flux'], lp['siib']['Flux'])
print '\tlog Electron density = %.2f +/- %.2f cm^-3' %(n, ne)
if lp.has_key('ha') and lp.has_key('hb'):
sfr, sfre = mcSFR(lp['ha']['Flux'], lp['hb']['Flux'], s3d)
print '\tSFR = %.3e +/- %.3e Msun/yr' %(sfr, sfre)
return {'EBV':[ebv, ebverro], 'ne':[n, ne], 'SFR':[sfr, sfre],
'12+log(O/H)(D16)': [oh, oherr],
'12+log(O/H)(O3N2)':[oho3n2, oherro3n2], '12+log(O/H)(N2)': [ohn2, oherrn2],
'T_e(SIII)': [tsiii, tsiiierr],
'T_e(OII)':[toii], 'T_e(OIII)':[toiii],
'12+log(O/H)T(SIII)':[ohtoiii, ohtoiiie], '12+log(S/H)T(SIII)':[shsoiii, shsoiiie]}
def metGrad(s3d, meth='S2', snlim = 1, nbin = 2, reff=None, incl=90, posang=0,
ylim1=7.95, ylim2=8.6, xlim1=0, xlim2=3.45, r25=None,
sC=0, xcen=None, ycen=None, nsamp=1000):
""" Obtains and plots the metallicity gradient of a galaxy, using a given
strong-line diagnostic ratio.
Parameters
----------
s3d : Spectrum3d class
Initial spectrum class with the data and error
meth : str
Acronym for the strong line diagnostic that will be used. Passed to
getOH.
snlim : float
Signal-to-noise limit for using spaxels in plot
nbin : integer
Number of bins in the resulting plot
reff : float
Effective radius in arcsec (used for scaling x-axis)
incl : float
Inclination of the galaxy, used for deprojecting the angular scales
ylim1 : float
Minimum limit of 12+log(O/H) plotted on y-axis
ylim2 : float
Maximum limit of 12+log(O/H) plotted on y-axis
xlim1 : float
Minimum limit of distance (kpc) plotted on x-axis
xlim2 : float
Maximum limit of distance (kpc) plotted on x-axis
Returns
-------
Nothing, but plots the metallicity gradient in a pdf file
"""
# posang -= 90
if xcen == None or ycen == None:
x, y = getGalcen(s3d, sC=sC, mask=True)
else:
logger.info('Using galaxy center at %.2f, %.2f' %(xcen, ycen))
x, y = xcen, ycen
ohmap, ohsnmap = getOH(s3d, meth=meth, sC=sC)
yindx, xindx = np.indices(ohmap.shape)[0], np.indices(ohmap.shape)[1]
distmap = ((yindx - y)**2 + (xindx - x)**2) ** 0.5
angmap = np.arctan((x-xindx) / (yindx-y)) * 180./np.pi
angmap[angmap < 0] = angmap[angmap < 0] + 180
# Distcor is distance correction based on angle to posang
# 1 means no correction
maxcor = (1. / np.sin(incl/180. * np.pi)) - 1
distcor = 1 + np.abs((np.sin((angmap - posang) * np.pi / 180.)) * maxcor)
distmap *= distcor
if nbin > 0:
# Median filter by nbin in each of the directions
ohmap = sp.ndimage.filters.median_filter(ohmap, nbin)
# Downsample
ohmap = ohmap[::nbin, ::nbin]
distmap = distmap[::nbin, ::nbin]
# Each bin has now a higher S/N by (nbin*nbin)**0.5
ohsnmap = ohsnmap[::nbin, ::nbin]*nbin
sel = (ohsnmap > snlim) * (~np.isnan(ohmap))
if reff != None:
distmap = (distmap[sel].flatten() * s3d.pixsky) / reff
elif r25 != None:
distmap = (distmap[sel].flatten() * s3d.pixsky) / r25
else:
distmap = (distmap[sel].flatten() * s3d.pixsky) * s3d.AngD
ohmap = ohmap[sel].flatten()
indizes = distmap.argsort()
distmap = distmap[indizes]
ohmap = ohmap[indizes]
sel = distmap < 1
distmap = distmap[sel]
ohmap = ohmap[sel]
# Bootstrapping fits
indizes1 = bootstrap(distmap, n_samples=nsamp)
distmaps = distmap[indizes1]
ohmaps = ohmap[indizes1]
distbins = np.arange(-0.1, 2, 0.1)
slope, inter, metgrads = [],[],[]
for i in range(nsamp):
res = linfit(distmaps[i], ohmaps[i], params = [8.6, -0.3])
slope.append(res[0][1])
inter.append(res[0][0])
metgrads.append(res[0][0] + distbins*res[0][1])
metgrads = np.array(metgrads).transpose()
minss1, bestss1, maxss1 = 3*[np.array([])]
for i in range(len(distbins)):
bestu, minu, maxu = geterrs(metgrads[i], sigma=1.0)
minss1 = np.append(minss1, bestu-minu)
bestss1 = np.append(bestss1, bestu)
maxss1= np.append(maxss1, bestu+minu)
if r25 != None:
logger.info('Intercept %.2f +/- %.2f' %(np.nanmedian(inter), np.std(inter)))
logger.info('Slope %.2f +/- %.2f' %(np.nanmedian(slope), np.std(slope)))
logger.info('Slope %.3f +/- %.3f' \
%(np.nanmedian(slope) / r25 / s3d.AngD, np.std(slope) / r25 / s3d.AngD))
# logger.info('Fit parameters %.3f' %( res[0][1] / r25 / s3d.AngD))
std, meandist, ddist = [], [], 0.03
for i in distbins:
sel = (distmap < i+ddist) * (distmap > i-ddist)
std.append(np.std(ohmap[sel]))
meandist.append(np.nanmedian(distmap[sel]))
# dist, oh = binspec(distmap, ohmap, wl=nbin)
fig2 = plt.figure(figsize = (7,4.5))
ax = fig2.add_subplot(1, 1, 1)
ax.plot(meandist, std, '-', color='black', lw=3, zorder=1)
ax.set_xlim(xlim1, xlim2)
fig2.savefig('%s_%s_gradstd.pdf' %(s3d.inst, s3d.target))
fig = plt.figure(figsize = (7,4.5))
fig.subplots_adjust(bottom=0.12, top=0.88, left=0.13, right=0.99)
ax = fig.add_subplot(1, 1, 1)
ax.plot(distmap, ohmap, 'o', color ='firebrick', ms=4)
ax.set_ylim(ylim1, ylim2)
ax.set_xlim(xlim1, xlim2)
# ax.fill_between(distbins, minss1, maxss1, color='grey', alpha=0.3)
ax.plot(distbins, bestss1, '-', color='black', lw=2, zorder=1)
ax.plot(distbins[distbins<0.25], 8.43 - 0.86 * distbins[distbins<0.25],
'--', color='black', lw=3, zorder=1)
if reff != None:
ax.set_xlabel(r'$\rm{Deprojected\,distance\,(R/R_e)}$', fontsize=16)
elif r25 != None:
ax.set_xlabel(r'$\rm{Deprojected\,distance\,(R/R_{25})}$', fontsize=16)
else:
ax.set_xlabel(r'$\rm{Deprojected\, distance\,from\,center\,(kpc)}$', fontsize=16)
ax2=ax.figure.add_axes(ax.get_position(), frameon = False, sharey=ax)
ax2.xaxis.tick_top()
ax.xaxis.tick_bottom()
ax2.xaxis.set_label_position("top")
if reff != None:
ax2.set_xlim(xlim1, xlim2 * reff * s3d.AngD)
ax2.set_xlabel(r'$\rm{Deprojected\,distance\,(kpc)}$', fontsize=16)
elif r25 != None:
ax2.set_xlim(xlim1, xlim2 * r25 * s3d.AngD)
ax2.set_xlabel(r'$\rm{Deprojected\,distance\,(kpc)}$', fontsize=16)
else:
ax2.set_xlim(xlim1, xlim2 / s3d.AngD)
ax2.set_xlabel(r'$\rm{Deprojected\,distance\,(arcsec)}$', fontsize=16)
ax.set_ylabel(r'$\rm{12+log(O/H)\,(D16)}$', fontsize=18)
fig.savefig('%s_%s_metgrad.pdf' %(s3d.inst, s3d.target))
plt.close(fig)
def voronoi_bin(plane, planee=None, planesn=None,
binplane=None, binplanee=None, binplanesn=None,
targetsn=10):
"""Bins a 2d-map, based on the voronoi tessalation of a (potentially
different) map.
Parameters
----------
plane : np.array (2-dimensional)
This is the data which we want to voronoi bin
planee : np.array (2-dimensional)
For the tesselation, we require either the error or the sn. This is
the error.
planesn : np.array (2-dimensional)
For the tesselation, we require either the error or the sn. This is
the sn.
binplane : np.array (2-dimensional)
Alternatively, a different map can be provided where the binning is
derived from .
binplanee : np.array (2-dimensional)
For the tesselation, we require either the error or the sn. This is
the error of the map that defines the binning.
binplanesn : np.array (2-dimensional)
For the tesselation, we require either the error or the sn. This is
the sn of the map that defines the binning.
Returns
-------
binnedplane : np.array (2-dimensional)
Binned plane derived from tesselation of plane
binnedplane : np.array (2-dimensional)
Signal to noise ration from binned plane
"""
binnedplane = np.copy(plane)
if planee != None:
binnedplanee = np.copy(planee)
elif planesn != None:
binnedplanee = np.copy(planesn)
if binplane == None:
binplane, binplanee, binplanesn = plane, planee, planesn
voronoi_idx = _voronoi_run(binplane, error=binplanee, snmap=binplanesn,
targetsn=targetsn)
voronoi_idx = voronoi_idx[np.argsort(voronoi_idx[:,2])]
for voro_bin in range(np.max(voronoi_idx[:,2])):
sel = (voronoi_idx[:,2] == voro_bin)
v_pixs = voronoi_idx[sel]
data_bin, data_err = np.array([]), np.array([])
for v_pix in v_pixs:
data_bin = np.append(data_bin, plane[v_pix[1], v_pix[0]])
if planee != None:
data_err = np.append(data_err, planee[v_pix[1], v_pix[0]])
else:
data_err = np.append(data_err, plane[v_pix[1], v_pix[0]]/\
planesn[v_pix[1], v_pix[0]])
bin_val = np.nansum(data_bin * 1./data_err**2)/np.nansum(1./data_err**2)
bin_err = 1. / np.nansum(1./data_err**2)**0.5
for v_pix in v_pixs:
binnedplane[v_pix[1], v_pix[0]] = bin_val
binnedplanee[v_pix[1], v_pix[0]] = bin_err
binnedplanesn = binnedplane/binnedplanee
return binnedplane, binnedplanesn
def _voronoi_run(plane, error=None, snmap=None, targetsn=10):
"""Convenience function that runs the voronoi tesselation code in module
voronoi
Parameters
----------
plane : np.array (2-dimensional)
This is the data which we want to voronoi bin
error : np.array (2-dimensional)
For the tesselation, we require either the error or the sn. This is
the error.
snmap : np.array (2-dimensional)
For the tesselation, we require either the error or the sn. This is
the sn.
targetsn : float
Target Signal-to-noise ratio in the Voronoi bins
Returns
-------
np.column_stack([indx, indy, binNum]) : np.array
This is a stack where each pixel indizes (indx, indy) are listed
with the corresponding bin number
"""
logger.info('Voronoi tesselation of input map with S/N = %i' %targetsn)
indx, indy, signal, noise = _voronoi_prep(plane, error, snmap)
binNum, xNode, yNode, xBar, yBar, sn, nPixels, scale = \
voronoi(indx, indy, signal, noise, targetsn, logger)
return np.column_stack([indx, indy, binNum])
def _voronoi_prep(plane, error = None, sn = None):
""" Convenience function that takes a 2d plane and error as input and prepares four vectors to be run
with voronoi binning
Parameters
----------
plane : np.array (2-dimensional)
This is the data which we want to voronoi bin
error : np.array (2-dimensional)
For the tesselation, we require either the error or the sn. This is
the error.
snmap : np.array (2-dimensional)
For the tesselation, we require either the error or the sn. This is
the sn.
Returns
-------
indx, indy : np.array
x- and y-indices for the individual spaxels
signal : np.array
data at spaxel x,y
noise : np.array
noise at spaxel x,y
"""
indx = np.indices(plane.shape)[1].flatten()
indy = np.indices(plane.shape)[0].flatten()
signal = plane.flatten()
if error != None:
noise = error.flatten()
elif sn != None:
noise = (plane/sn).flatten()
else:
raise SystemExit
return indx, indy, signal, noise
| mit |
biocyberman/bcbio-nextgen | bcbio/rnaseq/cufflinks.py | 5 | 10683 | """Assess transcript abundance in RNA-seq experiments using Cufflinks.
http://cufflinks.cbcb.umd.edu/manual.html
"""
import os
import shutil
import tempfile
import pandas as pd
from bcbio.utils import get_in, file_exists, safe_makedir
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.rnaseq import gtf, annotate_gtf
def run(align_file, ref_file, data):
config = data["config"]
cmd = _get_general_options(align_file, config)
cmd.extend(_get_no_assembly_options(ref_file, data))
out_dir = _get_output_dir(align_file, data)
tracking_file = os.path.join(out_dir, "genes.fpkm_tracking")
fpkm_file = os.path.join(out_dir, data['rgnames']['sample']) + ".fpkm"
tracking_file_isoform = os.path.join(out_dir, "isoforms.fpkm_tracking")
fpkm_file_isoform = os.path.join(out_dir, data['rgnames']['sample']) + ".isoform.fpkm"
if not file_exists(fpkm_file):
with file_transaction(data, out_dir) as tmp_out_dir:
safe_makedir(tmp_out_dir)
cmd.extend(["--output-dir", tmp_out_dir])
cmd.extend([align_file])
cmd = map(str, cmd)
do.run(cmd, "Cufflinks on %s." % (align_file))
fpkm_file = gene_tracking_to_fpkm(tracking_file, fpkm_file)
fpkm_file_isoform = gene_tracking_to_fpkm(tracking_file_isoform, fpkm_file_isoform)
return out_dir, fpkm_file, fpkm_file_isoform
def gene_tracking_to_fpkm(tracking_file, out_file):
"""
take a gene-level tracking file from Cufflinks and output a two column
table with the first column as IDs and the second column as FPKM for the
sample. combines FPKM from the same genes into one FPKM value to fix
this bug: http://seqanswers.com/forums/showthread.php?t=5224&page=2
"""
if file_exists(out_file):
return out_file
df = pd.io.parsers.read_table(tracking_file, sep="\t", header=0)
df = df[['tracking_id', 'FPKM']]
df = df.groupby(['tracking_id']).sum()
df.to_csv(out_file, sep="\t", header=False, index_label=False)
return out_file
def _get_general_options(align_file, config):
options = []
cufflinks = config_utils.get_program("cufflinks", config)
options.extend([cufflinks])
options.extend(["--num-threads", config["algorithm"].get("num_cores", 1)])
options.extend(["--quiet"])
options.extend(["--no-update-check"])
options.extend(["--max-bundle-frags", 2000000])
options.extend(_get_stranded_flag(config))
return options
def _get_no_assembly_options(ref_file, data):
options = []
options.extend(["--frag-bias-correct", ref_file])
options.extend(["--multi-read-correct"])
options.extend(["--upper-quartile-norm"])
gtf_file = data["genome_resources"]["rnaseq"].get("transcripts", "")
if gtf_file:
options.extend(["--GTF", gtf_file])
mask_file = data["genome_resources"]["rnaseq"].get("transcripts_mask", "")
if mask_file:
options.extend(["--mask-file", mask_file])
return options
def _get_stranded_flag(config):
strand_flag = {"unstranded": "fr-unstranded",
"firststrand": "fr-firststrand",
"secondstrand": "fr-secondstrand"}
stranded = get_in(config, ("algorithm", "strandedness"), "unstranded").lower()
assert stranded in strand_flag, ("%s is not a valid strandedness value. "
"Valid values are 'firststrand', "
"'secondstrand' and 'unstranded" % (stranded))
flag = strand_flag[stranded]
return ["--library-type", flag]
def _get_output_dir(align_file, data, sample_dir=True):
config = data["config"]
name = data["rgnames"]["sample"] if sample_dir else ""
return os.path.join(get_in(data, ("dirs", "work")), "cufflinks", name)
def assemble(bam_file, ref_file, num_cores, out_dir, data):
out_dir = os.path.join(out_dir, data["rgnames"]["sample"])
safe_makedir(out_dir)
out_file = os.path.join(out_dir, "cufflinks-assembly.gtf")
cufflinks_out_file = os.path.join(out_dir, "transcripts.gtf")
library_type = " ".join(_get_stranded_flag(data["config"]))
if file_exists(out_file):
return out_file
with file_transaction(data, out_dir) as tmp_out_dir:
cmd = ("cufflinks --output-dir {tmp_out_dir} --num-threads {num_cores} "
"--frag-bias-correct {ref_file} "
"{library_type} --multi-read-correct --upper-quartile-norm {bam_file}")
cmd = cmd.format(**locals())
do.run(cmd, "Assembling transcripts with Cufflinks using %s." % bam_file)
shutil.move(cufflinks_out_file, out_file)
return out_file
def clean_assembly(gtf_file, clean=None, dirty=None):
"""
clean the likely garbage transcripts from the GTF file including:
1. any novel single-exon transcripts
2. any features with an unknown strand
"""
base, ext = os.path.splitext(gtf_file)
db = gtf.get_gtf_db(gtf_file, in_memory=True)
clean = clean if clean else base + ".clean" + ext
dirty = dirty if dirty else base + ".dirty" + ext
if file_exists(clean):
return clean, dirty
logger.info("Cleaning features with an unknown strand from the assembly.")
with open(clean, "w") as clean_handle, open(dirty, "w") as dirty_handle:
for gene in db.features_of_type('gene'):
for transcript in db.children(gene, level=1):
if is_likely_noise(db, transcript):
write_transcript(db, dirty_handle, transcript)
else:
write_transcript(db, clean_handle, transcript)
return clean, dirty
def write_transcript(db, handle, transcript):
for feature in db.children(transcript):
handle.write(str(feature) + "\n")
def is_likely_noise(db, transcript):
if is_novel_single_exon(db, transcript):
return True
if strand_unknown(db, transcript):
return True
def strand_unknown(db, transcript):
"""
for unstranded data with novel transcripts single exon genes
will have no strand information. single exon novel genes are also
a source of noise in the Cufflinks assembly so this removes them
"""
features = list(db.children(transcript))
strand = features[0].strand
if strand == ".":
return True
else:
return False
def is_novel_single_exon(db, transcript):
features = list(db.children(transcript))
exons = [x for x in features if x.featuretype == "exon"]
class_code = features[0].attributes.get("class_code", None)[0]
if len(exons) == 1 and class_code == "u":
return True
return False
def fix_cufflinks_attributes(ref_gtf, merged_gtf, data, out_file=None):
"""
replace the cufflinks gene_id and transcript_id with the
gene_id and transcript_id from ref_gtf, where available
"""
base, ext = os.path.splitext(merged_gtf)
fixed = out_file if out_file else base + ".clean.fixed" + ext
if file_exists(fixed):
return fixed
ref_db = gtf.get_gtf_db(ref_gtf)
merged_db = gtf.get_gtf_db(merged_gtf, in_memory=True)
ref_tid_to_gid = {}
for gene in ref_db.features_of_type('gene'):
for transcript in ref_db.children(gene, level=1):
ref_tid_to_gid[transcript.id] = gene.id
ctid_to_cgid = {}
ctid_to_oid = {}
for gene in merged_db.features_of_type('gene'):
for transcript in merged_db.children(gene, level=1):
ctid_to_cgid[transcript.id] = gene.id
feature = list(merged_db.children(transcript))[0]
oid = feature.attributes.get("oId", [None])[0]
if oid:
ctid_to_oid[transcript.id] = oid
cgid_to_gid = {}
for ctid, oid in ctid_to_oid.items():
cgid = ctid_to_cgid.get(ctid, None)
oid = ctid_to_oid.get(ctid, None)
gid = ref_tid_to_gid.get(oid, None) if oid else None
if cgid and gid:
cgid_to_gid[cgid] = gid
with file_transaction(data, fixed) as tmp_fixed_file:
with open(tmp_fixed_file, "w") as out_handle:
for gene in merged_db.features_of_type('gene'):
for transcript in merged_db.children(gene, level=1):
for feature in merged_db.children(transcript):
cgid = feature.attributes.get("gene_id", [None])[0]
gid = cgid_to_gid.get(cgid, None)
ctid = None
if gid:
feature.attributes["gene_id"][0] = gid
ctid = feature.attributes.get("transcript_id",
[None])[0]
tid = ctid_to_oid.get(ctid, None)
if tid:
feature.attributes["transcript_id"][0] = tid
if "nearest_ref" in feature.attributes:
del feature.attributes["nearest_ref"]
if "oId" in feature.attributes:
del feature.attributes["oId"]
out_handle.write(str(feature) + "\n")
return fixed
def merge(assembled_gtfs, ref_file, gtf_file, num_cores, data):
"""
run cuffmerge on a set of assembled GTF files
"""
assembled_file = tempfile.NamedTemporaryFile(delete=False).name
with open(assembled_file, "w") as temp_handle:
for assembled in assembled_gtfs:
temp_handle.write(assembled + "\n")
out_dir = os.path.join("assembly", "cuffmerge")
merged_file = os.path.join(out_dir, "merged.gtf")
out_file = os.path.join(out_dir, "assembled.gtf")
if file_exists(out_file):
return out_file
if not file_exists(merged_file):
with file_transaction(data, out_dir) as tmp_out_dir:
cmd = ("cuffmerge -o {tmp_out_dir} --ref-gtf {gtf_file} "
"--num-threads {num_cores} --ref-sequence {ref_file} "
"{assembled_file}")
cmd = cmd.format(**locals())
message = ("Merging the following transcript assemblies with "
"Cuffmerge: %s" % ", ".join(assembled_gtfs))
do.run(cmd, message)
clean, _ = clean_assembly(merged_file)
fixed = fix_cufflinks_attributes(gtf_file, clean, data)
classified = annotate_gtf.annotate_novel_coding(fixed, gtf_file, ref_file,
data)
filtered = annotate_gtf.cleanup_transcripts(classified, gtf_file, ref_file)
shutil.move(filtered, out_file)
return out_file
| mit |
jyhmiinlin/cineFSE | CsTransform/fessler_nufft.py | 1 | 39917 | '''@package docstring
@author: Jyh-Miin Lin (Jimmy), Cambridge University
@address: [email protected]
Created on 2013/1/21
================================================================================
This file is part of pynufft.
pynufft is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pynufft is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pynufft. If not, see <http://www.gnu.org/licenses/>.
================================================================================
Remark
pynufft is the fast program aims to do constraint-inversion
of irregularly sampled data.
Among them, nufft.py was translated from NUFFT in MATLAB of
Jeffrey A Fessler et al, University of Michigan
which was a BSD-licensed work.
However, there are several important modifications. In
particular, the scaling factor adjoint NUFFT,
as only the Kaiser-Bessel window is realized.
Please cite J A Fessler, Bradley P Sutton.
Nonuniform fast Fourier transforms using min-max interpolation.
IEEE Trans. Sig. Proc., 51(2):560-74, Feb. 2003.
and
"CS-PROPELLER MRI with Parallel Coils Using NUFFT and Split-Bregman Method"(in progress 2013)
Jyh-Miin Lin, Andrew Patterson, Hing-Chiu Chang, Tzu-Chao Chuang, Martin J. Graves,
which is planned to be published soon.
2. Note the "better" results by min-max interpolator of J.A. Fessler et al
3. Other relevant works:
*c-version: http://www-user.tu-chemnitz.de/~potts/nfft/
is a c-library with gaussian interpolator
*fortran version: http://www.cims.nyu.edu/cmcl/nufft/nufft.html
alpha/beta stage
* MEX-version http://www.mathworks.com/matlabcentral/fileexchange/25135-nufft-nufft-usffft
'''
import numpy
import scipy.sparse
from scipy.sparse.csgraph import _validation # for cx_freeze debug
# import sys
import scipy.fftpack
try:
import pyfftw
except:
pass
try:
from numba import jit
except:
pass
# def mydot(A,B):
# return numpy.dot(A,B)
# def mysinc(A):
# return numpy.sinc(A)
# print('no pyfftw, use slow fft')
dtype = numpy.complex64
# try:
# from numba import autojit
# except:
# pass
# print('numba not supported')
# @jit
def pipe_density(V,W):
V1=V.conj().T
# E = V.dot( V1.dot( W ) )
# W = W*(E+1.0e-17)/(E*E+1.0e-17)
for pppj in xrange(0,10):
# W[W>1.0]=1.0
# print(pppj)
E = V.dot( V1.dot( W ) )
W = W*(E+1.0e-17)/(E**2+1.0e-17)
return W
def checker(input_var,desire_size):
if input_var is None:
print('input_variable does not exist!')
if desire_size is None:
print('desire_size does not exist!')
dd=numpy.size(desire_size)
dims = numpy.shape(input_var)
# print('dd=',dd,'dims=',dims)
if numpy.isnan(numpy.sum(input_var[:])):
print('input has NaN')
if numpy.ndim(input_var) < dd:
print('input signal has too few dimensions')
if dd > 1:
if dims[0:dd] != desire_size[0:dd]:
print(dims[0:dd])
print(desire_size)
print('input signal has wrong size1')
elif dd == 1:
if dims[0] != desire_size:
print(dims[0])
print(desire_size)
print('input signal has wrong size2')
if numpy.mod(numpy.prod(dims),numpy.prod(desire_size)) != 0:
print('input signal shape is not multiples of desired size!')
def outer_sum(xx,yy):
nx=numpy.size(xx)
ny=numpy.size(yy)
arg1 = numpy.tile(xx,(ny,1)).T
arg2 = numpy.tile(yy,(nx,1))
#cc = arg1 + arg2
return arg1 + arg2
def nufft_offset(om, J, K):
'''
For every om points(outside regular grids), find the nearest
central grid (from Kd dimension)
'''
gam = 2.0*numpy.pi/(K*1.0);
k0 = numpy.floor(1.0*om / gam - 1.0*J/2.0) # new way
return k0
def nufft_alpha_kb_fit(N, J, K):
'''
find out parameters alpha and beta
of scaling factor st['sn']
Note, when J = 1 , alpha is hardwired as [1,0,0...]
(uniform scaling factor)
'''
beta=1
#chat=0
Nmid=(N-1.0)/2.0
if N > 40:
#empirical L
L=13
else:
L=numpy.ceil(N/3)
nlist = numpy.arange(0,N)*1.0-Nmid
# print(nlist)
(kb_a,kb_m)=kaiser_bessel('string', J, 'best', 0, K/N)
# print(kb_a,kb_m)
if J > 1:
sn_kaiser = 1 / kaiser_bessel_ft(nlist/K, J, kb_a, kb_m, 1.0)
elif J ==1: # cases on grids
sn_kaiser = numpy.ones((1,N),dtype=dtype)
# print(sn_kaiser)
gam = 2*numpy.pi/K;
X_ant =beta*gam*nlist.reshape((N,1),order='F')
X_post= numpy.arange(0,L+1)
X_post=X_post.reshape((1,L+1),order='F')
X=numpy.dot(X_ant, X_post) # [N,L]
X=numpy.cos(X)
sn_kaiser=sn_kaiser.reshape((N,1),order='F').conj()
# print(numpy.shape(X),numpy.shape(sn_kaiser))
# print(X)
#sn_kaiser=sn_kaiser.reshape(N,1)
X=numpy.array(X,dtype=dtype)
sn_kaiser=numpy.array(sn_kaiser,dtype=dtype)
coef = numpy.linalg.lstsq(X,sn_kaiser)[0] #(X \ sn_kaiser.H);
# print('coef',coef)
#alphas=[]
alphas=coef
if J > 1:
alphas[0]=alphas[0]
alphas[1:]=alphas[1:]/2.0
elif J ==1: # cases on grids
alphas[0]=1.0
alphas[1:]=0.0
alphas=numpy.real(alphas)
return (alphas, beta)
def kaiser_bessel(x, J, alpha, kb_m, K_N):
if K_N != 2 :
kb_m = 0
alpha = 2.34 * J
else:
kb_m = 0 # hardwritten in Fessler's code, because it was claimed as the best!
jlist_bestzn={2: 2.5,
3: 2.27,
4: 2.31,
5: 2.34,
6: 2.32,
7: 2.32,
8: 2.35,
9: 2.34,
10: 2.34,
11: 2.35,
12: 2.34,
13: 2.35,
14: 2.35,
15: 2.35,
16: 2.33 }
if J in jlist_bestzn:
# print('demo key',jlist_bestzn[J])
alpha = J*jlist_bestzn[J]
#for jj in tmp_key:
#tmp_key=abs(tmp_key-J*numpy.ones(len(tmp_key)))
# print('alpha',alpha)
else:
#sml_idx=numpy.argmin(J-numpy.arange(2,17))
tmp_key=(jlist_bestzn.keys())
min_ind=numpy.argmin(abs(tmp_key-J*numpy.ones(len(tmp_key))))
p_J=tmp_key[min_ind]
alpha = J * jlist_bestzn[p_J]
print('well, this is not the best though',alpha)
kb_a=alpha
return (kb_a, kb_m)
def kaiser_bessel_ft(u, J, alpha, kb_m, d):
'''
interpolation weight for given J/alpha/kb-m
'''
# import types
# scipy.special.jv (besselj in matlab) only accept complex
# if u is not types.ComplexType:
# u=numpy.array(u,dtype=numpy.complex64)
u = u*(1.0+0.0j)
import scipy.special
z = numpy.sqrt( (2*numpy.pi*(J/2)*u)**2 - alpha**2 );
nu = d/2 + kb_m;
y = ((2*numpy.pi)**(d/2))* ((J/2)**d) * (alpha**kb_m) / scipy.special.iv(kb_m, alpha) * scipy.special.jv(nu, z) / (z**nu)
y = numpy.real(y);
return y
def nufft_scale1(N, K, alpha, beta, Nmid):
'''
calculate image space scaling factor
'''
# import types
# if alpha is types.ComplexType:
alpha=numpy.real(alpha)
# print('complex alpha may not work, but I just let it as')
L = len(alpha) - 1
if L > 0:
sn = numpy.zeros((N,1))
n = numpy.arange(0,N).reshape((N,1),order='F')
i_gam_n_n0 = 1j * (2*numpy.pi/K)*( n- Nmid)* beta
for l1 in xrange(-L,L+1):
alf = alpha[abs(l1)];
if l1 < 0:
alf = numpy.conj(alf)
sn = sn + alf*numpy.exp(i_gam_n_n0 * l1)
else:
sn = numpy.dot(alpha , numpy.ones((N,1),dtype=numpy.float32))
return sn
def nufft_scale(Nd, Kd, alpha, beta):
dd=numpy.size(Nd)
Nmid = (Nd-1)/2.0
if dd == 1:
sn = nufft_scale1(Nd, Kd, alpha, beta, Nmid);
# else:
# sn = 1
# for dimid in numpy.arange(0,dd):
# tmp = nufft_scale1(Nd[dimid], Kd[dimid], alpha[dimid], beta[dimid], Nmid[dimid])
# sn = numpy.dot(list(sn), tmp.H)
return sn
def nufft_T(N, J, K, tol, alpha, beta):
'''
equation (29) and (26)Fessler's paper
the pseudo-inverse of T
'''
import scipy.linalg
L = numpy.size(alpha) - 1
cssc = numpy.zeros((J,J));
[j1, j2] = numpy.mgrid[1:J+1, 1:J+1]
for l1 in xrange(-L,L+1):
for l2 in xrange(-L,L+1):
alf1 = alpha[abs(l1)]
if l1 < 0: alf1 = numpy.conj(alf1)
alf2 = alpha[abs(l2)]
if l2 < 0: alf2 = numpy.conj(alf2)
tmp = j2 - j1 + beta * (l1 - l2)
tmp = numpy.sinc(1.0*tmp/(1.0*K/N)) # the interpolator
cssc = cssc + alf1 * numpy.conj(alf2) * tmp;
#print([l1, l2, tmp ])
u_svd, s_svd, v_svd= scipy.linalg.svd(cssc)
smin=numpy.min(s_svd)
if smin < tol:
tol=tol
print('Poor conditioning %g => pinverse', smin)
else:
tol= 0.0
for jj in xrange(0,J):
if s_svd[jj] < tol/10:
s_svd[jj]=0
else:
s_svd[jj]=1/s_svd[jj]
s_svd= scipy.linalg.diagsvd(s_svd,len(u_svd),len(v_svd))
cssc = numpy.dot( numpy.dot(v_svd.conj().T,s_svd), u_svd.conj().T)
return cssc
def nufft_r(om, N, J, K, alpha, beta):
'''
equation (30) of Fessler's paper
'''
M = numpy.size(om) # 1D size
gam = 2.0*numpy.pi / (K*1.0)
nufft_offset0 = nufft_offset(om, J, K) # om/gam - nufft_offset , [M,1]
dk = 1.0*om/gam - nufft_offset0 # om/gam - nufft_offset , [M,1]
arg = outer_sum( -numpy.arange(1,J+1)*1.0, dk)
L = numpy.size(alpha) - 1
if L > 0:
rr = numpy.zeros((J,M))
# if J > 1:
for l1 in xrange(-L,L+1):
alf = alpha[abs(l1)]*1.0
if l1 < 0: alf = numpy.conj(alf)
r1 = numpy.sinc(1.0*(arg+1.0*l1*beta)/(1.0*K/N))
rr = 1.0*rr + alf * r1; # [J,M]
# elif J ==1:
# rr=rr+1.0
else: #L==0
rr = numpy.sinc(1.0*(arg+1.0*l1*beta)/(1.0*K/N))
return (rr,arg)
def block_outer_prod(x1, x2):
'''
multiply interpolators of different dimensions
'''
(J1,M)=x1.shape
(J2,M)=x2.shape
# print(J1,J2,M)
xx1 = x1.reshape((J1,1,M),order='F') #[J1 1 M] from [J1 M]
xx1 = numpy.tile(xx1,(1,J2,1)) #[J1 J2 M], emulating ndgrid
xx2 = x2.reshape((1,J2,M),order='F') # [1 J2 M] from [J2 M]
xx2 = numpy.tile(xx2,(J1,1,1)) # [J1 J2 M], emulating ndgrid
# ang_xx1=xx1/numpy.abs(xx1)
# ang_xx2=xx2/numpy.abs(xx2)
y= xx1* xx2
# y= ang_xx1*ang_xx2*numpy.sqrt(xx1*xx1.conj() + xx2*xx2.conj())
# RMS
return y # [J1 J2 M]
def block_outer_sum(x1, x2):
(J1,M)=x1.shape
(J2,M)=x2.shape
# print(J1,J2,M)
xx1 = x1.reshape((J1,1,M),order='F') #[J1 1 M] from [J1 M]
xx1 = numpy.tile(xx1,(1,J2,1)) #[J1 J2 M], emulating ndgrid
xx2 = x2.reshape((1,J2,M),order='F') # [1 J2 M] from [J2 M]
xx2 = numpy.tile(xx2,(J1,1,1)) # [J1 J2 M], emulating ndgrid
y= xx1+ xx2
return y # [J1 J2 M]
def crop_slice_ind(Nd):
return [slice(0, Nd[_ss]) for _ss in xrange(0,len(Nd))]
class nufft:
'''
pyNuff is ported to Python and Refined
by Jyh-Miin Lin at Cambridge University
DETAILS:
__init__(self,om, Nd, Kd,Jd): Create the class with om/Nd/Kd/Jd
om: the k-space points either on grids or outside
grids
Nd: dimension of images, e.g. (256,256) for 2D
Kd: Normally you should use Kd=2*Nd,e.g. (512,512)
of above example. However, used Kd=Nd when Jd=1
Jd: number of adjacents grids on kspace to do
interpolation
self.st: the structure storing information
self.st['Nd']: image dimension
self.st['Kd']: Kspace dimension
self.st['M']: number of data points(on k-space)
self.st['p']: interpolation kernel in
self.st['sn']: scaling in image space
self.st['w']: precomputed Cartesian Density
(the weighting in k-space)
X=self.forward(x): transforming the image x to X(points not on kspace
grids)
pseudo-code: X= st['p']FFT{x*st['sn'],Kd}/sqrt(prod(KD))
x2=self.backward(self,X):adjoint (conjugated operator) of forward
also known as (regridding)
pseudo-code: x = st['sn']*IFFT{X*st['p'].conj() ,Kd}*sqrt(prod(KD))
Note: distinguishable major modification:
1. modified of coefficient:
The coefficient of J. Fessler's version may be problematic.
While his forward projection is not scaled, and backward
projection is scaled up by (prod(Kd))-- as is wrong for
iterative reconstruction, because the result will be
scaled up by (prod(Kd))
The above coefficient is right in the sense of "adjoint"
operator, but it is wrong for iterative reconstruction!!
2. Efficient backwardforward():
see pyNufft_fast
3. Slice over higher dimension
The extraordinary property of pyNufft is the following:
x = x[[slice(0, Nd[_ss]) for _ss in range(0,numpy.size(Nd))]]
This sentence is exclusive for python, and it can scope
high dimensional array.
4.Support points on grids with Jd == 1:
when Jd = (1,1), the scaling factor st['sn'] = 1
REFERENCES
I didn't reinvented the program: it was translated from
the NUFFT in MATLAB of Jeffrey A Fessler, University of Michigan.
However, several important modifications are listed above. In
particular, the scaling factor st['scale']
Yet only the Kaiser-Bessel window was implemented.
Please refer to
"Nonuniform fast Fourier transforms using min-max interpolation."
IEEE Trans. Sig. Proc., 51(2):560-74, Feb. 2003.
'''
def __init__(self,om, Nd, Kd,Jd,n_shift):
'''
constructor of pyNufft
'''
'''
Constructor: Start from here
'''
self.debug = 0 # debug
Nd=tuple(Nd) # convert Nd to tuple for consistent structure
Jd=tuple(Jd) # convert Jd to tuple for consistent structure
Kd=tuple(Kd) # convert Kd to tuple for consistent structure
# n_shift: the fftshift position, it must be at center
# n_shift=tuple(numpy.array(Nd)/2)
# dimensionality of input space (usually 2 or 3)
dd=numpy.size(Nd)
#=====================================================================
# check input errors
#=====================================================================
st={}
ud={}
kd={}
st['sense']=0 # default sense control flag
st['sensemap']=[] # default sensemap is null
st['n_shift']=n_shift
#=======================================================================
# First, get alpha and beta: the weighting and freq
# of formula (28) of Fessler's paper
# in order to create slow-varying image space scaling
#=======================================================================
for dimid in xrange(0,dd):
(tmp_alpha,tmp_beta)=nufft_alpha_kb_fit(Nd[dimid], Jd[dimid], Kd[dimid])
st.setdefault('alpha', []).append(tmp_alpha)
st.setdefault('beta', []).append(tmp_beta)
st['tol'] = 0
st['Jd'] = Jd
st['Nd'] = Nd
st['Kd'] = Kd
M = om.shape[0]
st['M'] = M
st['om'] = om
st['sn'] = numpy.array(1.0+0.0j)
dimid_cnt=1
#=======================================================================
# create scaling factors st['sn'] given alpha/beta
# higher dimension implementation
#=======================================================================
for dimid in xrange(0,dd):
tmp = nufft_scale(Nd[dimid], Kd[dimid], st['alpha'][dimid], st['beta'][dimid])
dimid_cnt=Nd[dimid]*dimid_cnt
#=======================================================================
# higher dimension implementation: multiply over all dimension
#=======================================================================
# rms1= numpy.dot(st['sn'], tmp.T**0)
# rms2 = numpy.dot(st['sn']**0, tmp.T)
# ang_rms1 = rms1/numpy.abs(rms1)
# ang_rms2 = rms2/numpy.abs(rms2)
# st['sn'] = ang_rms1*ang_rms2* numpy.sqrt( rms1*rms1.conj() +rms2*rms2.conj() )
# # RMS
# st['sn'] = numpy.dot(st['sn'] , tmp.T )
# st['sn'] = numpy.reshape(st['sn'],(dimid_cnt,1),order='F')
# if True: # do not apply scaling
# st['sn']= numpy.ones((dimid_cnt,1),dtype=numpy.complex64)
# else:
st['sn'] = numpy.dot(st['sn'] , tmp.T )
st['sn'] = numpy.reshape(st['sn'],(dimid_cnt,1),order='F')**0.0 # JML do not apply scaling
#=======================================================================
# if numpy.size(Nd) > 1:
#=======================================================================
# high dimension, reshape for consistent out put
# order = 'F' is for fortran order otherwise it is C-type array
st['sn'] = st['sn'].reshape(Nd,order='F') # [(Nd)]
#=======================================================================
# else:
# st['sn'] = numpy.array(st['sn'],order='F')
# #=======================================================================
st['sn']=numpy.real(st['sn']) # only real scaling is relevant
# [J? M] interpolation coefficient vectors. will need kron of these later
for dimid in xrange(0,dd): # loop over dimensions
N = Nd[dimid]
J = Jd[dimid]
K = Kd[dimid]
alpha = st['alpha'][dimid]
beta = st['beta'][dimid]
#===================================================================
# formula 29 , 26 of Fessler's paper
#===================================================================
T = nufft_T(N, J, K, st['tol'], alpha, beta) # [J? J?]
#==================================================================
# formula 30 of Fessler's paper
#==================================================================
if self.debug==0:
pass
else:
print('dd',dd)
print('dimid',dimid)
(r,arg)= nufft_r(om[:,dimid], N, J, K, alpha, beta) # [J? M]
#==================================================================
# formula 25 of Fessler's paper
#==================================================================
c=numpy.dot(T,r)
#
# print('size of c, r',numpy.shape(c), numpy.shape(T),numpy.shape(r))
# import matplotlib.pyplot
# matplotlib.pyplot.plot(r[:,0:])
# matplotlib.pyplot.show()
#===================================================================
# grid intervals in radius
#===================================================================
gam = 2.0*numpy.pi/(K*1.0);
phase_scale = 1.0j * gam * (N-1.0)/2.0
phase = numpy.exp(phase_scale * arg) # [J? M] linear phase
ud[dimid] = phase * c
# indices into oversampled FFT components
# FORMULA 7
koff=nufft_offset(om[:,dimid], J, K)
# FORMULA 9
kd[dimid]= numpy.mod(outer_sum( numpy.arange(1,J+1)*1.0, koff),K)
if dimid > 0: # trick: pre-convert these indices into offsets!
# ('trick: pre-convert these indices into offsets!')
kd[dimid] = kd[dimid]*numpy.prod(Kd[0:dimid])-1
kk = kd[0] # [J1 M]
uu = ud[0] # [J1 M]
for dimid in xrange(1,dd):
Jprod = numpy.prod(Jd[:dimid+1])
kk = block_outer_sum(kk, kd[dimid])+1 # outer sum of indices
kk = kk.reshape((Jprod, M),order='F')
uu = block_outer_prod(uu, ud[dimid]) # outer product of coefficients
uu = uu.reshape((Jprod, M),order='F')
#now kk and uu are [*Jd M]
# % apply phase shift
# % pre-do Hermitian transpose of interpolation coefficients
phase = numpy.exp( 1.0j* numpy.dot(om, 1.0*numpy.array(n_shift,order='F'))).T # [1 M]
uu = uu.conj()*numpy.tile(phase,[numpy.prod(Jd),1]) #[*Jd M]
mm = numpy.arange(0,M)
mm = numpy.tile(mm,[numpy.prod(Jd),1]) # [Jd, M]
# print('shpae uu',uu[:])
# print('shpae kk',kk[:])
# print('shpae mm',mm[:])
# sn_mask=numpy.ones(st['Nd'],dtype=numpy.float16)
#########################################now remove the corners of sn############
# n_dims= numpy.size(st['Nd'])
#
# sp_rat =0.0
# for di in xrange(0,n_dims):
# sp_rat = sp_rat + (st['Nd'][di]/2)**2
#
# sp_rat = sp_rat**0.5
# x = numpy.ogrid[[slice(0, st['Nd'][_ss]) for _ss in xrange(0,n_dims)]]
#
# tmp = 0
# for di in xrange(0,n_dims):
# tmp = tmp + ( (x[di] - st['Nd'][di]/2.0)/(st['Nd'][di]/2.0) )**2
#
# tmp = (1.0*tmp)**0.5
#
# indx = tmp >=1.0
#
#
# st['sn'][indx] = numpy.mean(st['sn'][...])
#########################################now remove the corners of sn############
# st['sn']=st['sn']*sn_mask
st['p'] = scipy.sparse.csc_matrix(
(numpy.reshape(uu,(numpy.size(uu),)),
(numpy.reshape(mm,(numpy.size(mm),)), numpy.reshape(kk,(numpy.size(kk),)))),
(M,numpy.prod(Kd))
)
## Now doing the density compensation of Jackson ##
W=numpy.ones((st['M'],1))
# w=numpy.ones((numpy.prod(st['Kd']),1))
# for pppj in xrange(0,100):
# # W[W>1.0]=1.0
# # print(pppj)
# E = st['p'].dot( st['p'].conj().T.dot( W ) )
#
# W = W*(E+1.0e-17)/(E**2+1.0e-17)
W = pipe_density(st['p'],W)
# import matplotlib.pyplot
# matplotlib.pyplot.subplot(2,1,1)
# matplotlib.pyplot.plot(numpy.abs(E))
# matplotlib.pyplot.subplot(2,1,2)
# matplotlib.pyplot.plot(numpy.abs(W))
# # matplotlib.pyplot.show()
# matplotlib.pyplot.plot(numpy.abs(W))
# matplotlib.pyplot.show()
st['W'] = W
# st['w'] =
## Finish the density compensation of Jackson ##
# st['q'] = st['p']
# st['T'] = st['p'].conj().T.dot(st['p']) # huge memory leak>5G
# p_herm = st['p'].conj().T.dot(st['W'])
# print('W',numpy.shape(W))
# print('p',numpy.shape(st['p']))
# temp_w = numpy.tile(W,[1,numpy.prod(st['Kd'])])
# print('temp_w',numpy.shape(temp_w))
# st['q'] = st['p'].conj().multiply(st['p'])
# st['q'] = st['p'].conj().T.dot(p_herm.conj().T).diagonal()
# st['q'] = scipy.sparse.diags(W[:,0],offsets=0).dot(st['q'])
# st['q'] = st['q'].sum(0)
#
# st['q'] = numpy.array(st['q'] )
# # for pp in range(0,M):
# # st['q'][pp,:]=st['q'][pp,:]*W[pp,0]
#
# st['q']=numpy.reshape(st['q'],(numpy.prod(st['Kd']),1),order='F').real
st['w'] = numpy.abs(( st['p'].conj().T.dot(numpy.ones(st['W'].shape,dtype = numpy.float32))))#**2) ))
# st['q']=numpy.max(st['w'])*st['q']/numpy.max(st['q'])
import matplotlib.pyplot
# matplotlib.pyplot.imshow(numpy.reshape(st['w'],st['Kd']),
# cmap=matplotlib.cm.gray,
# norm=matplotlib.colors.Normalize(vmin=0.0, vmax=3.0))
# matplotlib.pyplot.plot(numpy.reshape(st['q'],st['Kd'])[:, 0])#,
# # cmap=matplotlib.cm.gray,
# # norm=matplotlib.colors.Normalize(vmin=0.0, vmax=3.0))
# # matplotlib.pyplot.imshow(st['sn'],
# # cmap=matplotlib.cm.gray,
# # norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0))
# matplotlib.pyplot.show()
# matplotlib.pyplot.plot(numpy.reshape(st['w'],st['Kd'])[:, 0])#,
# # cmap=matplotlib.cm.gray,
# # norm=matplotlib.colors.Normalize(vmin=0.0, vmax=3.0))
# # matplotlib.pyplot.imshow(st['sn'],
# # cmap=matplotlib.cm.gray,
# # norm=matplotlib.colors.Normalize(vmin=0.0, vmax=1.0))
# matplotlib.pyplot.show()
self.st=st
if self.debug==0:
pass
else:
print('st sn shape',st['sn'].shape)
self.gpu_flag=0
self.__initialize_gpu()
# self.__initialize_gpu2()
self.pyfftw_flag =self.__initialize_pyfftw()
# import multiprocessing
self.threads=1#multiprocessing.cpu_count()
self.st=st
# print( 'optimize sn and p' )
# temp_c = self.Nd2Kd(st['sn'],0)
# self.st['w'] = w
def __initialize_pyfftw(self):
pyfftw_flag = 0
try:
import pyfftw
pyfftw.interfaces.cache.enable()
pyfftw.interfaces.cache.set_keepalive_time(60) # keep live 60 seconds
pyfftw_flag = 1
print('use pyfftw')
except:
print('no pyfftw, use slow fft')
pyfftw_flag = 0
return pyfftw_flag
def __initialize_gpu(self):
try:
import reikna.cluda as cluda
from reikna.fft import FFT
# dtype = dtype#numpy.complex64
data = numpy.zeros( self.st['Kd'],dtype=numpy.complex64)
# data2 = numpy.empty_like(data)
api = cluda.ocl_api()
self.thr = api.Thread.create(async=True)
self.data_dev = self.thr.to_device(data)
# self.data_rec = self.thr.to_device(data2)
axes=range(0,numpy.size(self.st['Kd']))
myfft= FFT( data, axes=axes)
self.myfft = myfft.compile(self.thr,fast_math=True)
self.gpu_flag=1
print('create gpu fft?',self.gpu_flag)
print('line 642')
W= self.st['w'][...,0]
print('line 645')
self.W = numpy.reshape(W, self.st['Kd'],order='C')
print('line 647')
# self.thr2 = api.Thread.create()
print('line 649')
self.W_dev = self.thr.to_device(self.W.astype(dtype))
self.gpu_flag=1
print('line 652')
except:
self.gpu_flag=0
print('get error, using cpu')
# def __initialize_gpu2(self):
# try:
# # import reikna.cluda as cluda
# # from reikna.fft import FFT
# from pycuda.sparse.packeted import PacketedSpMV
# spmv = PacketedSpMV(self.st['p'], options.is_symmetric, numpy.complex64)
# # dtype = dtype#numpy.complex64
# data = numpy.zeros( self.st['Kd'],dtype=numpy.complex64)
# # data2 = numpy.empty_like(data)
# api = cluda.ocl_api()
# self.thr = api.Thread.create(async=True)
# self.data_dev = self.thr.to_device(data)
# # self.data_rec = self.thr.to_device(data2)
# axes=range(0,numpy.size(self.st['Kd']))
# myfft= FFT( data, axes=axes)
# self.myfft = myfft.compile(self.thr,fast_math=True)
#
# self.gpu_flag=1
# print('create gpu fft?',self.gpu_flag)
# print('line 642')
# W= self.st['w'][...,0]
# print('line 645')
# self.W = numpy.reshape(W, self.st['Kd'],order='C')
# print('line 647')
# # self.thr2 = api.Thread.create()
# print('line 649')
# self.W_dev = self.thr.to_device(self.W.astype(dtype))
# self.gpu_flag=1
# print('line 652')
# except:
# self.gpu_flag=0
# print('get error, using cpu')
def forward(self,x):
'''
foward(x): method of class pyNufft
Compute dd-dimensional Non-uniform transform of signal/image x
where d is the dimension of the data x.
INPUT:
case 1: x: ndarray, [Nd[0], Nd[1], ... , Kd[dd-1] ]
case 2: x: ndarray, [Nd[0], Nd[1], ... , Kd[dd-1], Lprod]
OUTPUT:
X: ndarray, [M, Lprod] (Lprod=1 in case 1)
where M =st['M']
'''
st=self.st
Nd = st['Nd']
Kd = st['Kd']
dims = numpy.shape(x)
dd = numpy.size(Nd)
# print('in nufft, dims:dd',dims,dd)
# print('ndim(x)',numpy.ndim(x[:,1]))
# exceptions
if self.debug==0:
pass
else:
checker(x,Nd)
if numpy.ndim(x) == dd:
Lprod = 1
elif numpy.ndim(x) > dd: # multi-channel data
Lprod = numpy.size(x)/numpy.prod(Nd)
'''
Now transform Nd grids to Kd grids(not be reshaped)
'''
Xk=self.Nd2Kd(x, 1)
# interpolate using precomputed sparse matrix
if Lprod > 1:
X = numpy.reshape(st['p'].dot(Xk),(st['M'],)+( Lprod,),order='F')
else:
X = numpy.reshape(st['p'].dot(Xk),(st['M'],1),order='F')
if self.debug==0:
pass
else:
checker(X,st['M']) # check output
return X
def backward(self,X):
'''
backward(x): method of class pyNufft
from [M x Lprod] shaped input, compute its adjoint(conjugate) of
Non-uniform Fourier transform
INPUT:
X: ndarray, [M, Lprod] (Lprod=1 in case 1)
where M =st['M']
OUTPUT:
x: ndarray, [Nd[0], Nd[1], ... , Kd[dd-1], Lprod]
'''
# extract attributes from structure
st=self.st
Nd = st['Nd']
Kd = st['Kd']
if self.debug==0:
pass
else:
checker(X,st['M']) # check X of correct shape
dims = numpy.shape(X)
Lprod= numpy.prod(dims[1:])
# how many channel * slices
if numpy.size(dims) == 1:
Lprod = 1
else:
Lprod = dims[1]
# print('Xshape',X.shape)
# print('stp.shape',st['p'].shape)
Xk_all = st['p'].getH().dot(X)
# Multiply X with interpolator st['p'] [prod(Kd) Lprod]
'''
Now transform Kd grids to Nd grids(not be reshaped)
'''
x = self.Kd2Nd(Xk_all, 1)
if self.debug==0:
pass
else:
checker(x,Nd) # check output
return x
def Nd2Kd(self,x, weight_flag):
'''
Now transform Nd grids to Kd grids(not be reshaped)
'''
#print('661 x.shape',x.shape)
st=self.st
Nd = st['Nd']
Kd = st['Kd']
dims = numpy.shape(x)
dd = numpy.size(Nd)
# print('in nufft, dims:dd',dims,dd)
# print('ndim(x)',numpy.ndim(x[:,1]))
# checker
if self.debug==0:
pass
else:
checker(x,Nd)
# if numpy.ndim(x) == dd:
# Lprod = 1
# elif numpy.ndim(x) > dd: # multi-channel data
# Lprod = numpy.size(x)/numpy.prod(Nd)
if numpy.ndim(x) == dd:
if weight_flag == 1:
x = x * st['sn']
else:
pass
Xk = self.emb_fftn(x, Kd,range(0,dd))
Xk = numpy.reshape(Xk, (numpy.prod(Kd),),order='F')
else:# otherwise, collapse all excess dimensions into just one
xx = numpy.reshape(x, [numpy.prod(Nd), numpy.prod(dims[(dd):])],order='F') # [*Nd *L]
L = numpy.shape(xx)[1]
# print('L=',L)
# print('Lprod',Lprod)
Xk = numpy.zeros( (numpy.prod(Kd), L),dtype=dtype) # [*Kd *L]
for ll in xrange(0,L):
xl = numpy.reshape(xx[:,ll], Nd,order='F') # l'th signal
if weight_flag == 1:
xl = xl * st['sn'] # scaling factors
else:
pass
Xk[:,ll] = numpy.reshape(self.emb_fftn(xl, Kd,range(0,dd)),
(numpy.prod(Kd),),order='F')
if self.debug==0:
pass
else:
checker(Xk,numpy.prod(Kd))
return Xk
def Kd2Nd(self,Xk_all,weight_flag):
st=self.st
Nd = st['Nd']
Kd = st['Kd']
dd = len(Nd)
if self.debug==0:
pass
else:
checker(Xk_all,numpy.prod(Kd)) # check X of correct shape
dims = numpy.shape(Xk_all)
Lprod= numpy.prod(dims[1:]) # how many channel * slices
if numpy.size(dims) == 1:
Lprod = 1
else:
Lprod = dims[1]
x=numpy.zeros(Kd+(Lprod,),dtype=dtype) # [*Kd *L]
# if Lprod > 1:
Xk = numpy.reshape(Xk_all, Kd+(Lprod,) , order='F')
for ll in xrange(0,Lprod): # ll = 0, 1,... Lprod-1
x[...,ll] = self.emb_ifftn(Xk[...,ll],Kd,range(0,dd))#.flatten(order='F'))
x = x[crop_slice_ind(Nd)]
if weight_flag == 0:
pass
else: #weight_flag =1 scaling factors
snc = st['sn'].conj()
for ll in xrange(0,Lprod): # ll = 0, 1,... Lprod-1
x[...,ll] = x[...,ll]*snc #% scaling factors
if self.debug==0:
pass # turn off checker
else:
checker(x,Nd) # checking size of x divisible by Nd
return x
def gpufftn(self, data_dev):
'''
gpufftn: an interface to external gpu fftn:
not working to date: awaiting more reliable gpu codes
'''
# self.data_dev = self.thr.to_device(output_x.astype(dtype))
self.myfft( data_dev, data_dev)
return data_dev#.get()
def gpuifftn(self, data_dev):
'''
gpufftn: an interface to external gpu fftn:
not working to date: awaiting more reliable gpu codes
'''
# self.data_dev = self.thr.to_device(output_x.astype(dtype))
self.myfft( data_dev, data_dev, inverse=1)
return data_dev#.get()
def emb_fftn(self, input_x, output_dim, act_axes):
'''
embedded fftn: abstraction of fft for future gpu computing
'''
output_x=numpy.zeros(output_dim, dtype=dtype)
#print('output_dim',input_dim,output_dim,range(0,numpy.size(input_dim)))
# output_x[[slice(0, input_x.shape[_ss]) for _ss in range(0,len(input_x.shape))]] = input_x
output_x[crop_slice_ind(input_x.shape)] = input_x
# print('GPU flag',self.gpu_flag)
# print('pyfftw flag',self.pyfftw_flag)
# if self.gpu_flag == 1:
try:
# print('using GPU')
# print('using GPU interface')
# self.data_dev = self.ctx.to_device(output_x.astype(dtype))
# self.myfft(self.res_dev, self.data_dev, -1)
# output_x=self.res_dev.get()
# self.data_dev =
self.thr.to_device(output_x.astype(dtype), dest=self.data_dev)
output_x=self.gpufftn(self.data_dev).get()
except:
# elif self.gpu_flag ==0:
# elif self.pyfftw_flag == 1:
try:
# print('using pyfftw interface')
# print('threads=',self.threads)
output_x=pyfftw.interfaces.scipy_fftpack.fftn(output_x, output_dim, act_axes,
threads=self.threads,overwrite_x=True)
except:
# else:
# print('using OLD interface')
output_x=scipy.fftpack.fftn(output_x, output_dim, act_axes)
return output_x
# def emb_ifftn(self, input_x, output_dim, act_axes):
# '''
# embedded ifftn: abstraction of ifft for future gpu computing
# '''
#
# # output_x=input_x
# output_x=self.emb_fftn(input_x.conj(), output_dim, act_axes).conj()/numpy.prod(output_dim)
#
# return output_x
def emb_ifftn(self, input_x, output_dim, act_axes):
'''
embedded fftn: abstraction of fft for future gpu computing
'''
output_x=numpy.zeros(output_dim, dtype=dtype)
#print('output_dim',input_dim,output_dim,range(0,numpy.size(input_dim)))
# output_x[[slice(0, input_x.shape[_ss]) for _ss in range(0,len(input_x.shape))]] = input_x
output_x[crop_slice_ind(input_x.shape)] = input_x
# print('GPU flag',self.gpu_flag)
# print('pyfftw flag',self.pyfftw_flag)
# if self.gpu_flag == 1:
try:
# print('using GPU')
# print('using GPU interface')
# self.data_dev = self.ctx.to_device(output_x.astype(dtype))
# self.myfft(self.res_dev, self.data_dev, -1)
# output_x=self.res_dev.get()
# self.data_dev =
self.thr.to_device(output_x.astype(dtype), dest=self.data_dev)
output_x=self.gpuifftn(self.data_dev).get()
except:
# elif self.pyfftw_flag == 1:
try:
# print('using pyfftw interface')
# print('threads=',self.threads)
output_x=pyfftw.interfaces.scipy_fftpack.ifftn(output_x, output_dim, act_axes,
threads=self.threads,overwrite_x=True)
except:
# else:
# print('using OLD interface')
output_x=scipy.fftpack.ifftn(output_x, output_dim, act_axes)
return output_x | gpl-3.0 |
sepehr125/pybrain | pybrain/tools/plotting/classification.py | 25 | 5041 | """
matplotlib helpers for ClassificationDataSet and classifiers in general.
"""
__author__ = 'Werner Beroux <[email protected]>'
import numpy as np
import matplotlib.pyplot as plt
class ClassificationDataSetPlot(object):
@staticmethod
def plot_module_classification_sequence_performance(module, dataset, sequence_index, bounds=(0, 1)):
"""Plot all outputs and fill the value of the output of the correct category.
The grapth of a good classifier should be like all white, with all other
values very low. A graph with lot of black is a bad sign.
:param module: The module/network to plot.
:type module: pybrain.structure.modules.module.Module
:param dataset: Training dataset used as inputs and expected outputs.
:type dataset: SequenceClassificationDataSet
:param sequence_index: Sequence index to plot in the dataset.
:type sequence_index: int
:param bounds: Outputs lower and upper bound.
:type bounds: list
"""
outputs = []
valid_output = []
module.reset()
for sample in dataset.getSequenceIterator(sequence_index):
out = module.activate(sample[0])
outputs.append(out)
valid_output.append(out[sample[1].argmax()])
plt.fill_between(list(range(len(valid_output))), 1, valid_output, facecolor='k', alpha=0.8)
plt.plot(outputs, linewidth=4, alpha=0.7)
plt.yticks(bounds)
@staticmethod
def plot_module_classification_dataset_performance(module, dataset, cols=4, bounds=(0, 1)):
"""Do a plot_module_classification_sequence_performance() for all sequences in the dataset.
:param module: The module/network to plot.
:type module: pybrain.structure.modules.module.Module
:param dataset: Training dataset used as inputs and expected outputs.
:type dataset: SequenceClassificationDataSet
:param bounds: Outputs lower and upper bound.
:type bounds: list
"""
# Outputs and detected category error for each sequence.
for i in range(dataset.getNumSequences()):
plt.subplot(ceil(dataset.getNumSequences() / float(cols)), cols, i)
ClassificationDataSetPlot.plot_module_classification_sequence_performance(module, dataset, i, bounds)
@staticmethod
def punchcard_module_classification_performance(module, dataset, s=800):
"""Punshcard-like clasification performances.__add__(
Actual dataset target vs. estimated target by the module.
The graph of a good classfier module should a have no red dots visible:
- Red Dots: Target (only visible if the black dot doesn't cover it).
- Green Dots: Estimated classes confidences (size = outputs means).
- Black Dots: Single winnter-takes-all estimated target.
:param module: An object that has at least reset() and activate() methods.
:param dataset: A classification dataset. It should, for any given sequence, have a constant target.
:type dataset: ClassificationDataSet
"""
# TODO: Could also show the variation for each dot
# (e.g., vertical errorbar of 2*stddev).
# TODO: Could keep together all sequences of a given class and somehow
# arrange them closer togther. Could then aggregate them and
# include horizontal errorbar.
def calculate_module_output_mean(module, inputs):
"""Returns the mean of the module's outputs for a given input list."""
outputs = np.zeros(module.outdim)
module.reset()
for inpt in inputs:
outputs += module.activate(inpt)
return outputs / len(inputs)
num_sequences = dataset.getNumSequences()
actual = []
expected = []
confidence_x = []
confidence_s = []
correct = 0
for seq_i in range(num_sequences):
seq = dataset.getSequence(seq_i)
outputs_mean = calculate_module_output_mean(module, seq[0])
actual.append(np.argmax(outputs_mean))
confidence_s.append(np.array(outputs_mean))
confidence_x.append(np.ones(module.outdim) * seq_i)
# FIXME: np.argmax(seq[1]) == dataset.getSequenceClass(seq_i) is bugged for split SequenceClassificationDataSet.
expected.append(np.argmax(seq[1]))
if actual[-1] == expected[-1]:
correct += 1
plt.title('{}% Correct Classification (red dots mean bad classification)'.format(correct * 100 / num_sequences))
plt.xlabel('Sequence')
plt.ylabel('Class')
plt.scatter(list(range(num_sequences)), expected, s=s, c='r', linewidths=0)
plt.scatter(list(range(num_sequences)), actual, s=s, c='k')
plt.scatter(confidence_x, list(range(module.outdim)) * num_sequences, s=s*np.array(confidence_s), c='g', linewidths=0, alpha=0.66)
plt.yticks(list(range(dataset.nClasses)), dataset.class_labels)
| bsd-3-clause |
tmhm/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
stevetjoa/stanford-mir | crossValidationTemplate.py | 3 | 6287 | #
# CCRMA MIR workshop 2014 code examples and useful functions
#
# Ported to SKLearn Python by Steve Tjoa & Leigh M. Smith
#
import numpy as np
from sklearn import cross_validation
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
import urllib2 # the lib that handles the url stuff
import urllib
from essentia.standard import MonoLoader
from essentia.standard import ZeroCrossingRate, CentralMoments, Spectrum, Windowing, Centroid, DistributionShape
# Here are examples of how scaling functions would be written, however nowdays SciKit
# Learn will do it for you with the MinMaxScaler!
def scale(x):
"""
Linearly scale data, to a range of -1 to +1.
Return the scaled data, the gradient and offset factors.
"""
if x.shape[0] != 1: # Make sure that data is matrix and not a vector
dataMax = x.max(0)
dataRange = dataMax - x.min(0) # First, find out the ranges of the data
multfactor = 2 / dataRange # Scaling to be {-1 to +1}. This is a range of "2"
newMaxval = multfactor * dataMax
subfactor = newMaxval - 1 # Center around 0, which means subtract 1
data = np.empty(x.shape)
for featureIndex in range(x.shape[1]):
data[:, featureIndex] = x[:, featureIndex] * multfactor[featureIndex] - subfactor[featureIndex]
else: # If data is a vector, just return vector and multiplication = 1, subtraction = 0
data = x
multfactor = 1
subfactor = 0
return data, multfactor, subfactor
def rescale(featureVector, mf, sf):
"""
featureVector = the unscaled feature vector
mf = the multiplication factor used for linear scaling
sf = the subtraction factor used for linear scaling
"""
featureVector_scaled = np.empty(featureVector.shape)
for featureIndex in range(featureVector.shape[1]):
# linear scale
featureVector_scaled[:, featureIndex] = featureVector[:, featureIndex] * mf[featureIndex] - sf[featureIndex]
return featureVector_scaled
def crossValidateKNN(features, labels):
"""
This code is provided as a template for cross-validation of KNN classification.
Pass into the variables "features", "labels" your own data.
As well, you can replace the code in the "BUILD" and "EVALUATE" sections
to be useful with other types of Classifiers.
"""
#
# CROSS VALIDATION
# The features array is arranged as rows of instances, columns of features in our training set.
numInstances, numFeatures = features.shape
numFolds = min(10, numInstances) # how many cross-validation folds do you want - (default=10)
# divide test set into 10 random subsets
indices = cross_validation.KFold(numInstances, n_folds = numFolds)
errors = np.empty(numFolds)
for foldIndex, (train_index, test_index) in enumerate(indices):
# SEGMENT DATA INTO FOLDS
print('Fold: %d' % foldIndex)
print("TRAIN: %s" % train_index)
print("TEST: %s" % test_index)
# SCALE
scaler = preprocessing.MinMaxScaler(feature_range = (-1, 1))
trainingFeatures = scaler.fit_transform(features.take(train_index, 0))
# BUILD NEW MODEL - ADD YOUR MODEL BUILDING CODE HERE...
model = KNeighborsClassifier(n_neighbors = 3)
model.fit(trainingFeatures, labels.take(train_index, 0))
# RESCALE TEST DATA TO TRAINING SCALE SPACE
testingFeatures = scaler.transform(features.take(test_index, 0))
# EVALUATE WITH TEST DATA - ADD YOUR MODEL EVALUATION CODE HERE
model_output = model.predict(testingFeatures)
print("KNN prediction %s" % model_output) # Debugging.
# CONVERT labels(test,:) LABELS TO SAME FORMAT TO COMPUTE ERROR
labels_test = labels.take(test_index, 0)
# COUNT ERRORS. matches is a boolean array, taking the mean does the right thing.
matches = model_output != labels_test
errors[foldIndex] = matches.mean()
print('cross validation error: %f' % errors.mean())
print('cross validation accuracy: %f' % (1.0 - errors.mean()))
return errors
def process_corpus(corpusURL):
"""Read a list of files to process from the text file at corpusURL. Return a list of URLs"""
# Open and read each line
urlListTextData = urllib2.urlopen(corpusURL) # it's a file like object and works just like a file
for fileURL in urlListTextData: # files are iterable
yield fileURL.rstrip()
# return [fileURL.rstrip() for fileURL in urlListTextData]
# audioFileURLs = process_corpus("https://ccrma.stanford.edu/workshops/mir2014/SmallCorpus.txt")
# for audioFileURL in process_corpus("https://ccrma.stanford.edu/workshops/mir2014/SmallCorpus.txt"):
# print audioFileURL
def spectral_features(filelist):
"""
Given a list of files, retrieve them, analyse the first 100mS of each file and return
a feature table.
"""
number_of_files = len(filelist)
number_of_features = 5
features = np.zeros([number_of_files, number_of_features])
sample_rate = 44100
for file_index, url in enumerate(filelist):
print url
urllib.urlretrieve(url, filename='/tmp/localfile.wav')
audio = MonoLoader(filename = '/tmp/localfile.wav', sampleRate = sample_rate)()
zcr = ZeroCrossingRate()
hamming_window = Windowing(type = 'hamming') # we need to window the frame to avoid FFT artifacts.
spectrum = Spectrum()
central_moments = CentralMoments()
distributionshape = DistributionShape()
spectral_centroid = Centroid()
frame_size = int(round(0.100 * sample_rate)) # 100ms
# Only do the first frame for now.
# TODO we should generate values for the entire file, probably by averaging the features.
current_frame = audio[0 : frame_size]
features[file_index, 0] = zcr(current_frame)
spectral_magnitude = spectrum(hamming_window(current_frame))
centroid = spectral_centroid(spectral_magnitude)
spectral_moments = distributionshape(central_moments(spectral_magnitude))
features[file_index, 1] = centroid
features[file_index, 2:5] = spectral_moments
return features
| mit |
HUGG/NGWM2016-modelling-course | Lessons/06-Rheology-of-the-lithosphere/scripts/solutions/strength-envelope-uniform-crust.py | 1 | 7747 | '''
strength-envelope-uniform-crust.py
This script can be used for plotting strength envelopes for a lithosphere with
a uniform crust. The script includes a function sstemp() that can be used for
calculating the lithospheric temperature as a function of the input material
properties
dwhipp 01.16 (modified from code written by L. Kaislaniemi)
'''
# --- USER INPUT PARAMETERS ---
# Model geometry
z_surf = 0.0 # Elevation of upper surface [km]
z_bott = 100.0 # Elevation of bottom boundary [km]
nz = 100 # Number of grid points
# Boundary conditions
T_surf = 0.0 # Temperature of the upper surface [deg C]
q_surf = 65.0 # Surface heat flow [mW/m^2]
# Thermal conductivity (constant across model thickness)
k = 2.75 # Thermal conductivity [W/m K]
# Deformation rate
edot = 1.0e-15 # Reference strain rate [1/s]
# Constants
g = 9.81 # Gravitational acceleration [m/s^2]
R = 8.314 # Gas constant
# MATERIAL PROPERTY DEFINITIONS
# Crust (Wet quartzite - Gleason and Tullis, 1995)
mat1 = 'Wet quartzite'
L1 = 35.0 # Thickness of layer one [km]
A1 = 1.1 # Average heat production rate for crust [uW/m^3]
rho1 = 2800.0 # Rock density [kg/m^3]
Avisc1 = 1.1e-4 # Viscosity constant [MPa^-n s^-1]
Q1 = 223.0 # Activation energy [kJ/mol]
n1 = 4.0 # Power-law exponent
mu1 = 0.85 # Friction coefficient
C1 = 0.0 # Cohesion [MPa]
# Mantle (Wet olivine - Hirth and Kohlstedt, 1996)
mat2 = 'Wet olivine'
A2 = 0.02 # Heat production rate for mantle [uW/m^3]
rho2 = 3300.0 # Rock density [kg/m^3]
Avisc2 = 4.876e6 # Viscosity constant [MPa^-n s^-1]
Q2 = 515.0 # Activation energy [kJ/mol]
n2 = 3.5 # Power-law exponent
mu2 = 0.6 # Friction coefficient
C2 = 60.0 # Cohesion [MPa]
# END MATERIAL PROPERTY DEFINITIONS
# --- END USER INPUTS ---
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
# Define function to calculate temperatures (DO NOT MODIFY)
def sstemp(A,k,dz,nz,T_surf,q_surf):
# Generate an empty array for temperature values
T = np.zeros(nz)
# Set boundary conditions
# the upper surface temperature and the temperature at one grid point below
T[0] = T_surf
## Grid point one needs special handling as T[-1] is not available
# Calculate "ghost point" outside the model domain, where grid point -1
# would be, assuming surface heat flow q_surf
Tghost = T[0] - q_surf * dz / k # = "T[-1]"
# Use the same finite difference formula to calculate T as for
# the inner points, but replace "T[-1]" by ghost point value
T[1] = -A[1] * dz**2 / k - Tghost + 2*T[0]
# Calculate temperatures across specified thickness
for i in range(2, nz): # NB! Grid points 0 and 1 omitted as they cannot be calculated
T[i] = -A[i] * dz**2 / k - T[i-2] + 2*T[i-1]
return T
# Define conversion factors
km2m = 1.0e3 # [km] to [m]
mW2W = 1.0e-3 # [mW] to [W]
uW2W = 1.0e-6 # [uW] to [W]
MPa2Pa = 1.0e6 # [MPa] to [Pa]
kJ2J = 1.0e3 # [kJ] to [J]
# Convert material property units to SI
z_surf = z_surf * km2m
z_bott = z_bott * km2m
q_surf = q_surf * mW2W
A1 = A1 * uW2W
A2 = A2 * uW2W
L1 = L1 * km2m
Avisc1 = Avisc1 / MPa2Pa**n1
Avisc2 = Avisc2 / MPa2Pa**n2
Q1 = Q1 * kJ2J
Q2 = Q2 * kJ2J
C1 = C1 * MPa2Pa
C2 = C2 * MPa2Pa
# Generate the grid
# Regular grid is used, so that in FD calculations
# only dz is needed. Array z is used only for plotting.
dz = (z_bott - z_surf) / (nz - 1)
z = np.linspace(z_surf, z_bott, nz)
# Generate the material properties arrays
A = np.zeros(nz)
rho = np.zeros(nz)
Avisc = np.zeros(nz)
Q = np.zeros(nz)
n = np.zeros(nz)
mu = np.zeros(nz)
C = np.zeros(nz)
for i in range(nz):
# Fill material property arrays for depths in the crust
if z[i] <= L1:
A[i] = A1
rho[i] = rho1
Avisc[i] = Avisc1
Q[i] = Q1
n[i] = n1
mu[i] = mu1
C[i] = C1
# Fill material property arrays for depths in the mantle
else:
A[i] = A2
rho[i] = rho2
Avisc[i] = Avisc2
Q[i] = Q2
n[i] = n2
mu[i] = mu2
C[i] = C2
# Call function to get temperatures
T = sstemp(A,k,dz,nz,T_surf,q_surf)
T = T + 273.15 # Convert to Kelvins
# Initialize arrays
P = np.zeros(nz)
frict = np.zeros(nz)
visc = np.zeros(nz)
strength = np.zeros(nz)
# Calculate lithostatic pressure
for i in range(1, nz):
P[i] = P[i-1] + rho[i] * g * dz
# Loop over all points and calculate frictional and viscous strengths
for i in range(nz):
# Calculate frictional shear strength using Coulomb criterion
frict[i] = mu[i] * P[i] + C[i]
# Calculate viscous strength using Dorn's law
visc[i] = (edot/Avisc[i])**((1./n[i]))*np.exp(Q[i]/(n[i]*R*T[i]))
# Use logical statements to make sure the stored strength value is the
# smaller of the two calculated above for each point
if frict[i] <= visc[i]:
strength[i] = frict[i]
else:
strength[i] = visc[i]
# Rescale values for plotting
T = T - 273.15
z = z / km2m
strength = strength / MPa2Pa
z_bott = z_bott / km2m
# Create figure window for plot
plt.figure()
# PLOT #1 - Left panel, temperature versus depth
plt.subplot(121)
# Plot temperature on left subplot
plt.plot(T, z, "ro-")
# Invert y axis
plt.gca().invert_yaxis()
# Label axes
plt.xlabel("Temperature [$^{\circ}$C]")
plt.ylabel("Depth [km]")
# PLOT #2 - Right panel, strength versus depth
plt.subplot(122)
# Plot strength versus deprh
plt.plot(strength, z, "ko-") # minus sign is placed to make z axis point down
# Invert y axis
plt.gca().invert_yaxis()
# Label axes
plt.xlabel("Strength [MPa]")
# Add text labels for materials
plt.text(0.2*max(strength), 0.8*z_bott, "Layer 1: "+mat1)
plt.text(0.2*max(strength), 0.85*z_bott, "Layer 2: "+mat2)
plt.show()
| mit |
IndraVikas/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
simon-pepin/scikit-learn | sklearn/kernel_ridge.py | 44 | 6504 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
unrealcv/unrealcv | examples/interactive_control.py | 1 | 2271 | # A toy example to use python to control the game.
from unrealcv import client
from unrealcv.util import read_npy, read_png
import matplotlib.pyplot as plt
import numpy as np
help_message = '''
A demo showing how to control a game using python
a, d: rotate camera to left and right.
q, e: move camera up and down.
left, right, up, down: move around
'''
plt.rcParams['keymap.save'] = ''
def main():
loc = None
rot = None
fig, ax = plt.subplots()
img = np.zeros((480, 640, 4))
ax.imshow(img)
def onpress(event):
rot_offset = 10 # Rotate 5 degree for each key press
loc_offset = 10 # Move 5.0 when press a key
if event.key == 'a': rot[1] -= rot_offset
if event.key == 'd': rot[1] += rot_offset
if event.key == 'q': loc[2] += loc_offset # Move up
if event.key == 'e': loc[2] -= loc_offset # Move down
if event.key == 'w': loc[1] -= loc_offset
if event.key == 's': loc[1] += loc_offset
if event.key == 'up': loc[1] -= loc_offset
if event.key == 'down': loc[1] += loc_offset
if event.key == 'left': loc[0] -= loc_offset
if event.key == 'right': loc[0] += loc_offset
cmd = 'vset /camera/0/rotation %s' % ' '.join([str(v) for v in rot])
client.request(cmd)
cmd = 'vset /camera/0/location %s' % ' '.join([str(v) for v in loc])
client.request(cmd)
res = client.request('vget /camera/0/lit png')
img = read_png(res)
# print(event.key)
# print('Requested image %s' % str(img.shape))
ax.imshow(img)
fig.canvas.draw()
client.connect()
if not client.isconnected():
print 'UnrealCV server is not running. Run the game from http://unrealcv.github.io first.'
return
else:
print help_message
init_loc = [float(v) for v in client.request('vget /camera/0/location').split(' ')]
init_rot = [float(v) for v in client.request('vget /camera/0/rotation').split(' ')]
loc = init_loc; rot = init_rot
fig.canvas.mpl_connect('key_press_event', onpress)
plt.title('Keep this window in focus, it will be used to receive key press event')
plt.axis('off')
plt.show() # Add event handler
if __name__ == '__main__':
main()
| mit |
timqian/sms-tools | lectures/6-Harmonic-model/plots-code/f0Yin.py | 1 | 1719 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming
import sys, os
import essentia.standard as ess
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
import stft as STFT
def f0Yin(x, N, H, minf0, maxf0):
# fundamental frequency detection using the Yin algorithm
# x: input sound, N: window size,
# minf0: minimum f0 frequency in Hz, maxf0: maximim f0 frequency in Hz,
# returns f0
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=N, type='hann')
pitchYin= ess.PitchYinFFT(minFrequency = minf0, maxFrequency = maxf0)
pin = 0
pend = x.size-N
f0 = []
while pin<pend:
mX = spectrum(window(x[pin:pin+N]))
f0t = pitchYin(mX)
f0 = np.append(f0, f0t[0])
pin += H
return f0
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/vignesh.wav')
plt.figure(1, figsize=(9, 7))
N = 2048
H = 256
w = hamming(2048)
mX, pX = STFT.stftAnal(x, fs, w, N, H)
maxplotfreq = 2000.0
frmTime = H*np.arange(mX[:,0].size)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N*maxplotfreq/fs+1]))
N = 2048
minf0 = 130
maxf0 = 300
H = 256
f0 = f0Yin(x, N, H, minf0, maxf0)
yf0 = UF.sinewaveSynth(f0, .8, H, fs)
frmTime = H*np.arange(f0.size)/float(fs)
plt.plot(frmTime, f0, linewidth=2, color='k')
plt.autoscale(tight=True)
plt.title('mX + f0 (vignesh.wav), YIN: N=2048, H = 256 ')
plt.tight_layout()
plt.savefig('f0Yin.png')
UF.wavwrite(yf0, fs, 'f0Yin.wav')
plt.show()
| agpl-3.0 |
saguziel/incubator-airflow | airflow/hooks/base_hook.py | 5 | 2571 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import logging
import os
import random
from airflow import settings
from airflow.models import Connection
from airflow.exceptions import AirflowException
CONN_ENV_PREFIX = 'AIRFLOW_CONN_'
class BaseHook(object):
"""
Abstract base class for hooks, hooks are meant as an interface to
interact with external systems. MySqlHook, HiveHook, PigHook return
object that can handle the connection and interaction to specific
instances of these systems, and expose consistent methods to interact
with them.
"""
def __init__(self, source):
pass
@classmethod
def get_connections(cls, conn_id):
session = settings.Session()
db = (
session.query(Connection)
.filter(Connection.conn_id == conn_id)
.all()
)
session.expunge_all()
session.close()
if not db:
raise AirflowException(
"The conn_id `{0}` isn't defined".format(conn_id))
return db
@classmethod
def get_connection(cls, conn_id):
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
conn = None
if environment_uri:
conn = Connection(conn_id=conn_id, uri=environment_uri)
else:
conn = random.choice(cls.get_connections(conn_id))
if conn.host:
logging.info("Using connection to: " + conn.host)
return conn
@classmethod
def get_hook(cls, conn_id):
connection = cls.get_connection(conn_id)
return connection.get_hook()
def get_conn(self):
raise NotImplementedError()
def get_records(self, sql):
raise NotImplementedError()
def get_pandas_df(self, sql):
raise NotImplementedError()
def run(self, sql):
raise NotImplementedError()
| apache-2.0 |
Brazelton-Lab/lab_scripts | parse_bt2.py | 1 | 3180 | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
"""
Copyright:
parse_bt2 Extracts the statistics found in bowtie2 output files to a csv.
Copyright (C) 2016 William Brazelton, Nickolas Lee
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'Nickolas Lee'
__license__ = 'GPLv3'
__status__ = 'Production'
__version__ = '1.0'
import re
import pandas as pd
import argparse
def main():
ap = argparse.ArgumentParser(description="Extracts the statistics found in bowtie2 output files to a csv.")
ap.add_argument("-o", "--output_file", default="bt2.csv", help="The name of the csv file to be written.")
ap.add_argument("files", nargs="+", help="A list of bowtie2 output files.")
args = ap.parse_args()
data = pd.DataFrame()
for file in args.files:
bt = parse_bt(file)
for k in bt.keys():
data.loc[file,k] = bt[k]
print("Saving to: "+ args.output_file)
data.to_csv(args.output_file, index=False)
def parse_bt(file):
"""Extracts the statistics found in a bowtie2 output file."""
nums = list()
start_re = re.compile("\d+ reads; of these:")
amount_re = re.compile("^\s*(\d+)\s")
perc_re = re.compile("(\d+\.\d+)\%")
with open(file) as bt:
start = False
al_rate = 0
for line in bt.readlines():
if start_re.match(line):
start = True
if start:
amount = amount_re.match(line)
percent = perc_re.match(line)
if percent:
al_rate = percent.group(1)
if amount:
nums.append(amount.group(1))
col_names = ["Total reads", "Total paired reads", "Paired reads with no concordant alignment",
"Uniquely aligned paired reads", "Paired reads with multiple alignments",
"", "Uniquely discordantly aligned paired reads", "Paired reads with no alignment",
"","","","", "Total single reads", "Unaligned single reads",
"Uniquely aligned single reads", "Single reads with multiple alignments"]
data = dict()
if nums:
for n in range(len(nums)):
data[col_names[n]] = nums[n]
data.pop("") # removes unneeded key
data["Total mapped fragments"] = int(data["Total paired reads"]) - int(data["Paired reads with no alignment"])
if float(al_rate) > 0:
data["Overall alignment rate"] = al_rate
data["file_name"] = file
return data
if __name__ == "__main__":
main()
| gpl-2.0 |
RAJSD2610/SDNopenflowSwitchAnalysis | FlowPersec.py | 1 | 2752 | import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
path= os.path.expanduser("~/Desktop/ece671/udpt8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
u8=[]
i=0
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while i<(num_files/2) :
# df+=[]
j=i+1
path ="/home/vetri/Desktop/ece671/udpt8/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u8.append(y)
i+=1
print(u8)
path= os.path.expanduser("~/Desktop/ece671/udpnone")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
u=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/udpnone/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u.append(y)
i+=1
print(u)
path= os.path.expanduser("~/Desktop/ece671/tcpnone")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/tcpnone/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
t.append(y)
i+=1
print(t)
path= os.path.expanduser("~/Desktop/ece671/tcpt8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t8=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/tcpt8/fpersec."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
t8.append(y)
i+=1
print(t8)
#plt.figure(figsize=(4, 5))
#plt.figure(figsize=(4, 5))
plt.plot(list(range(1,len(u8)+1)),u8, '.-',label="udpt8")
plt.plot(list(range(1,len(u)+1)),u, '.-',label="udpnone")
plt.plot(list(range(1,len(t)+1)),t, '.-',label="tcpnone")
plt.plot(list(range(1,len(t8)+1)),t8, '.-',label="tcpt8")
plt.title("Flows Programmed per Sec")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
| gpl-3.0 |
robbymeals/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
turambar/pyDatasets | pydatasets/mp/ProcessMP.py | 2 | 13905 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 9 14:29:38 2014
@author: dbell
"""
import argparse
import itertools
import pandas as pd
import os
import re
import scipy.io as sio
import numpy as np
import sys
import time
import copy
import csv
from pandas import Panel, DataFrame, Series
import MP
_NOTEBOOK = False; # change to true if you're going to run this locally
parser = argparse.ArgumentParser()
parser.add_argument('data', type=unicode)
parser.add_argument('out', type=unicode)
parser.add_argument('outcomes', type=unicode)
#parser.add_argument('-vi', '--var_info', type=unicode, default='../../data/vpicu/physiologic-variable-normals.csv') # I need to look at this
parser.add_argument('-rr', '--resample_rate', type=int, default=60)
parser.add_argument('-ma', '--max_age', type=int, default=18)
if _NOTEBOOK:
args = parser.parse_args( 'settings go here' ) #you're going to have to update with proper params
else:
args = parser.parse_args()
data_dir = args.data
out_dir = args.out
outcomes_dir = args.outcomes
try:
os.makedirs(out_dir)
except:
pass
#ranges = DataFrame.from_csv(args.var.info, parse_dates=False)
#ranges = ranges[['Low', 'Normal', 'High']]
#varids = ranges.index.tolist()
resample_rate = '{0}min'.format(args.resample_rate)
max_age = args.max_age
patients = os.listdir(data_dir)
N = len(patients)
Xraw = []
Xmiss = []
X = []
allPatientObjs = dict()
#Traw = np.zeros((N,), dtype=int) # Number of (irregular) samples for episode
#T = np.zeros((N,), dtype=int) # Resampled episode lengths (for convenience)
#age = np.zeros((N,), dtype=int) # Per-episode patient ages in months
#gender = np.zeros((N,), dtype=int) # Per-episode patient gender
#weight = np.zeros((N,)) # Per-episode patient weight
#lmf = np.zeros((N,)) # Last-first duration
allCondensedVals = None
deathAllCondensedVals = None
aliveAllCondensedVals = None
channels = 'channels.txt'
channelsDict = dict()
num = 0
chans = file(channels, 'r')
for chan in chans:
chan = chan.rstrip()
channelsDict[chan] = {}
num = num + 1
chans.close()
data_dict = {'sampling_rate': {'mean': [], 'standard_dev': None }, 'value': {'mean': [], 'standard_dev': None }, 'missing': 0 }
patient_stats = copy.deepcopy(channelsDict)
overall_stats = copy.deepcopy(channelsDict)
death_patient_stats = copy.deepcopy(channelsDict)
alive_patient_stats = copy.deepcopy(channelsDict)
death_overall_stats = copy.deepcopy(channelsDict)
alive_overall_stats = copy.deepcopy(channelsDict)
for key in channelsDict:
patient_stats[key] = copy.deepcopy(data_dict)
overall_stats[key] = copy.deepcopy(data_dict)
alive_patient_stats[key] = copy.deepcopy(data_dict)
alive_overall_stats[key] = copy.deepcopy(data_dict)
death_patient_stats[key] = copy.deepcopy(data_dict)
death_overall_stats[key] = copy.deepcopy(data_dict)
for key in channelsDict:
channelsDict[key] = []
idx = 0
count_nodata = 0
total_time_start = time.time()
for pat in patients:
start_create = time.time()
try:
subj = MP.MPSubject.from_file(os.path.join(data_dir, pat), channelsDict)
except MP.InvalidMPDataException as e:
count_nodata += 1
if e.field == 'msmts' and e.err == 'size':
pass
else:
sys.stdout.write('\nskipping: ' + str(e))
continue
end_create = time.time()
allPatientObjs[subj._recordID] = subj
#print "Time to create: "
#print (end_create - start_create)
start_post = time.time()
if allCondensedVals is None:
allCondensedVals = subj.condensed_values()
else:
condVals = subj.condensed_values()
for feature in allCondensedVals:
if condVals[feature]:
patient_stats[feature]['value']['mean'].append( np.mean(condVals[feature]) )
patient_stats[feature]['sampling_rate']['mean'].append( len(condVals[feature]) )
overall_stats[feature]['value']['mean'] += condVals[feature]
overall_stats[feature]['sampling_rate']['mean'].append( len(condVals[feature]) )
allCondensedVals[feature] += (condVals[feature])
else:
patient_stats[feature]['missing'] = 1
overall_stats[feature]['missing'] += 1
s = subj.as_nparray()
if s.size == 0:
print 'getting skipped'
continue
mylmf = (s[:,0].max() - s[:,0].min())/60
#store raw time series
Xraw.append(s)
#resample
sr = subj.as_nparray_resampled(rate=resample_rate, impute=True)
#first couple resampled have nan vals
if not np.all(~np.isnan(s)):
print pat
X.append(sr)
idx += 1
end_post = time.time()
#print "Time for post logic: "
#print (end_post - start_post)
for feature in allCondensedVals:
overall_stats[feature]['value']['standard_dev'] = np.std(overall_stats[feature]['value']['mean'])
overall_stats[feature]['value']['mean'] = np.mean(overall_stats[feature]['value']['mean'])
overall_stats[feature]['sampling_rate']['standard_dev'] = np.std(overall_stats[feature]['sampling_rate']['mean'])
overall_stats[feature]['sampling_rate']['mean'] = np.mean(overall_stats[feature]['sampling_rate']['mean'])
patient_stats[feature]['value']['standard_dev'] = np.std(patient_stats[feature]['value']['mean'])
patient_stats[feature]['value']['mean'] = np.mean(patient_stats[feature]['value']['mean'])
patient_stats[feature]['sampling_rate']['standard_dev'] = np.std(patient_stats[feature]['sampling_rate']['mean'])
patient_stats[feature]['sampling_rate']['mean'] = np.mean(patient_stats[feature]['sampling_rate']['mean'])
overall_stats[feature]['missing'] = overall_stats[feature]['missing'] / float(idx - count_nodata)
patient_stats[feature]['missing'] = patient_stats[feature]['missing'] / float(idx - count_nodata)
#compute meta data
stds_vals = []
means_vals = []
for feature in patient_stats:
stds_vals.append(patient_stats[feature]['value']['standard_dev'])
means_vals.append(patient_stats[feature]['value']['mean'])
meta_statistics = {'mean_of_means': np.mean(means_vals), 'std_of_means': np.std(means_vals), 'mean_of_stds': np.mean(stds_vals), 'std_of_stds': np.std(stds_vals) }
csv_dict = copy.deepcopy(channelsDict)
params_dict = {'R_i': np.nan , 'E[R_i]': np.nan, 'std[R_i]': np.nan, 'V[R_i]': np.nan, 'R': np.nan, 'E[X_vi]': np.nan, 'std[X_vi]': np.nan, 'E[E[X_vi]]': np.nan, 'std[E[X_vi]]': np.nan, 'E[std[X_vi]]': np.nan, 'std[std[X_vi]]': np.nan, 'E[X_v]': np.nan, 'std[X_v]': np.nan, 'F_v': np.nan }
for feature in csv_dict:
print feature
csv_dict[feature] = copy.deepcopy(params_dict)
print csv_dict[feature]
csv_dict[feature]['E[R_i]'] = patient_stats[feature]['sampling_rate']['mean']
csv_dict[feature]['std[R_i]'] = patient_stats[feature]['sampling_rate']['standard_dev']
csv_dict[feature]['V[R_i]'] = patient_stats[feature]['sampling_rate']['standard_dev']**2
csv_dict[feature]['R'] = overall_stats[feature]['sampling_rate']['mean']
csv_dict[feature]['E[X_vi]'] = patient_stats[feature]['value']['mean']
csv_dict[feature]['std[X_vi]'] = patient_stats[feature]['value']['standard_dev']
csv_dict[feature]['E[X_v]'] = overall_stats[feature]['value']['mean']
csv_dict[feature]['std[X_v]'] = overall_stats[feature]['value']['standard_dev']
csv_dict[feature]['F_v'] = overall_stats[feature]['missing']
if feature == 'Albumin':
csv_dict[feature]['E[E[X_vi]]'] = meta_statistics['mean_of_means']
csv_dict[feature]['std[E[X_vi]]'] = meta_statistics['std_of_means']
csv_dict[feature]['E[std[X_vi]]'] = meta_statistics['mean_of_stds']
csv_dict[feature]['std[std[X_vi]]'] = meta_statistics['std_of_stds']
csv_df = DataFrame.from_dict(csv_dict).sort_index().transpose().sort_index()
print csv_df
csv_df.to_csv('/Users/dbell/Desktop/csv.csv')
print 'overall'
print overall_stats
print 'patient'
print patient_stats
total_time_end = time.time()
#print allCondensedVals
print "Total elapsed time: "
print (total_time_end - total_time_start)
#outcome code
outcomeFile = file(outcomes_dir, 'r')
outcomeFile.readline()
numPatients = 0
deathCount = 0
no_outcome = 0
length_stays = []
for line in csv.reader(outcomeFile, delimiter=','):
numPatients += 1
rID = int(line[0])
if rID in allPatientObjs:
patObj = allPatientObjs[rID]
death = int(line[5])
patObj.death = death
deathCount += death
los = int(line[3])
patObj.length_of_stay = los
length_stays.append(los)
if hasattr(patObj, '_condValues'):
del patObj._condValues
#patObj.to_pickle('/Users/dbell/Desktop/pickled_data')
else:
no_outcome += 1
deathPercentage = float(deathCount) / float(numPatients)
mean_length_stay = np.mean(length_stays)
standard_dev_length_stay = np.std(length_stays)
print 'death percentage: '
print deathPercentage
print 'No data: '
print count_nodata
print 'No outcome: '
print no_outcome
#by outcome
for pat2 in patients:
if not hasattr(pat2, 'death'):
pass
elif pat2.death:
if deathAllCondensedVals is None:
deathAllCondensedVals = subj.condensed_values()
else:
condVals = pat2.condensed_values()
for feature in deathAllCondensedVals:
if condVals[feature]:
death_patient_stats[feature]['value']['mean'].append( np.mean(condVals[feature]) )
death_patient_stats[feature]['sampling_rate']['mean'].append( len(condVals[feature]) )
death_overall_stats[feature]['value']['mean'] += condVals[feature]
death_overall_stats[feature]['sampling_rate']['mean'].append( len(condVals[feature]) )
deathAllCondensedVals[feature] += (condVals[feature])
else:
death_patient_stats[feature]['missing'] = 1
death_overall_stats[feature]['missing'] += 1
else:
if aliveAllCondensedVals is None:
aliveAllCondensedVals = subj.condensed_values()
else:
condVals = pat2.condensed_values()
for feature in allCondensedVals:
if condVals[feature]:
alive_patient_stats[feature]['value']['mean'].append( np.mean(condVals[feature]) )
alive_patient_stats[feature]['sampling_rate']['mean'].append( len(condVals[feature]) )
alive_overall_stats[feature]['value']['mean'] += condVals[feature]
alive_overall_stats[feature]['sampling_rate']['mean'].append( len(condVals[feature]) )
aliveAllCondensedVals[feature] += (condVals[feature])
else:
alive_patient_stats[feature]['missing'] = 1
alive_overall_stats[feature]['missing'] += 1
for feature in allCondensedVals:
death_overall_stats[feature]['value']['standard_dev'] = np.std(death_overall_stats[feature]['value']['mean'])
death_overall_stats[feature]['value']['mean'] = np.mean(death_overall_stats[feature]['value']['mean'])
death_overall_stats[feature]['sampling_rate']['standard_dev'] = np.std(death_overall_stats[feature]['sampling_rate']['mean'])
death_overall_stats[feature]['sampling_rate']['mean'] = np.mean(death_overall_stats[feature]['sampling_rate']['mean'])
death_patient_stats[feature]['value']['standard_dev'] = np.std(death_patient_stats[feature]['value']['mean'])
death_patient_stats[feature]['value']['mean'] = np.mean(death_patient_stats[feature]['value']['mean'])
death_patient_stats[feature]['sampling_rate']['standard_dev'] = np.std(death_patient_stats[feature]['sampling_rate']['mean'])
death_patient_stats[feature]['sampling_rate']['mean'] = np.mean(death_patient_stats[feature]['sampling_rate']['mean'])
death_overall_stats[feature]['missing'] = death_overall_stats[feature]['missing'] / float(deathCount)
death_patient_stats[feature]['missing'] = death_patient_stats[feature]['missing'] / float(deathCount)
alive_overall_stats[feature]['value']['standard_dev'] = np.std(alive_overall_stats[feature]['value']['mean'])
alive_overall_stats[feature]['value']['mean'] = np.mean(alive_overall_stats[feature]['value']['mean'])
alive_overall_stats[feature]['sampling_rate']['standard_dev'] = np.std(alive_overall_stats[feature]['sampling_rate']['mean'])
alive_overall_stats[feature]['sampling_rate']['mean'] = np.mean(alive_overall_stats[feature]['sampling_rate']['mean'])
alive_patient_stats[feature]['value']['standard_dev'] = np.std(alive_patient_stats[feature]['value']['mean'])
alive_patient_stats[feature]['value']['mean'] = np.mean(alive_patient_stats[feature]['value']['mean'])
alive_patient_stats[feature]['sampling_rate']['standard_dev'] = np.std(alive_patient_stats[feature]['sampling_rate']['mean'])
alive_patient_stats[feature]['sampling_rate']['mean'] = np.mean(alive_patient_stats[feature]['sampling_rate']['mean'])
alive_overall_stats[feature]['missing'] = alive_overall_stats[feature]['missing'] / float(deathCount)
alive_patient_stats[feature]['missing'] = alive_patient_stats[feature]['missing'] / float(deathCount)
print 'death overall'
print death_overall_stats
print 'alive overall'
print alive_overall_stats
print 'death patient'
print death_patient_stats
print 'alive patient'
print alive_patient_stats
#features = features[0:idx,]
#epids = epids[0:idx]
#Traw = Traw[0:idx]
#T = T[0:idx]
#age = age[0:idx]
#gender = gender[0:idx]
#weight = weight[0:idx]
#y = y[0:idx]
#pdiag = pdiag[0:idx]
#los = los[0:idx]
#lmf = lmf[0:idx]
| apache-2.0 |
Sentient07/scikit-learn | sklearn/linear_model/sag.py | 18 | 11273 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
from .base import make_dataset
from .sag_fast import sag
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
"""
if loss in ('log', 'multinomial'):
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
janmedlock/HIV-95-vaccine | plots/infections_averted_map.py | 1 | 4039 | #!/usr/bin/python3
'''
Make maps of the infections averted at different times.
'''
import os.path
import sys
from matplotlib import colors as mcolors
from matplotlib import pyplot
from matplotlib import ticker
import numpy
import pandas
sys.path.append(os.path.dirname(__file__))
import common
import mapplot
import stats
sys.path.append('..')
import model
baseline = model.target.StatusQuo()
interventions = (
model.target.UNAIDS95(),
model.target.Vaccine(treatment_target = model.target.StatusQuo()),
model.target.Vaccine(treatment_target = model.target.UNAIDS95()))
baseline = str(baseline)
interventions = list(map(str, interventions))
time = 2035
scale = 0.01
title = 'Infections Averted (Compared to {})'.format(baseline)
vmin = 0.1 / scale
vmax = 0.9 / scale
norm = mcolors.Normalize(vmin = vmin, vmax = vmax)
cmap = 'plasma_r'
label_coords = (-130, -30)
height = 0.28
pad = 0.02
cpad = 0.05
aspect = 1.45
def plot(infections_averted):
countries = infections_averted.index
interventions = infections_averted.columns
fig = pyplot.figure(figsize = (common.width_1_5column, 6))
nrows = len(interventions)
for (i, intv) in enumerate(interventions):
if i < nrows - 1:
h = height
b = 1 - height - (height + pad) * i
else:
h = 1 - (height + pad) * (nrows - 1)
b = 0
m = mapplot.Basemap(rect = (0, b, 1, h),
anchor = (0.5, 1))
mappable = m.choropleth(
countries,
infections_averted.loc[:, intv] / scale,
cmap = cmap,
norm = norm,
vmin = vmin,
vmax = vmax)
label = common.get_target_label(intv)
label = label.replace('_', ' ').replace('+', '\n+')
X, Y = label_coords
m.text_coords(
X, Y, label,
fontdict = dict(size = pyplot.rcParams['font.size'] + 3),
horizontalalignment = 'center',
verticalalignment = 'center')
cbar = fig.colorbar(mappable,
label = title,
orientation = 'horizontal',
fraction = 0.23,
pad = cpad,
shrink = 0.8,
panchor = False,
format = common.PercentFormatter())
# Try to work around ugliness from viewer bugs.
cbar.solids.set_edgecolor('face')
cbar.solids.drawedges = False
ticklabels = cbar.ax.get_xticklabels()
if infections_averted.min().min() < vmin * scale:
ticklabels[0].set_text(r'$\leq\!$' + ticklabels[0].get_text())
if infections_averted.max().max() > vmax * scale:
ticklabels[-1].set_text(r'$\geq\!$' + ticklabels[-1].get_text())
cbar.ax.set_xticklabels(ticklabels)
cbar.ax.tick_params(labelsize = pyplot.rcParams['font.size'])
w, h = fig.get_size_inches()
fig.set_size_inches(w, w * aspect, forward = True)
common.savefig(fig, '{}.pdf'.format(common.get_filebase()))
common.savefig(fig, '{}.png'.format(common.get_filebase()))
def _get_infections_averted():
infections_averted = pandas.DataFrame(columns = interventions,
index = common.all_countries)
for country in common.all_countries:
print(country)
try:
rb = model.results.load(country, baseline)
except FileNotFoundError:
pass
else:
x = rb.new_infections[:, -1]
for intv in interventions:
try:
r = model.results.load(country, intv)
except FileNotFoundError:
pass
else:
y = r.new_infections[:, -1]
infections_averted.loc[country, intv] = stats.median(
(x - y) / x)
return infections_averted
if __name__ == '__main__':
infections_averted = _get_infections_averted()
plot(infections_averted)
pyplot.show()
| agpl-3.0 |
chrisburr/scikit-learn | sklearn/mixture/gmm.py | 19 | 30655 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
dcherian/pyroms | pyroms_toolbox/pyroms_toolbox/TS_diagram.py | 1 | 1601 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import pyroms
import pyroms_toolbox
def TS_diagram(temp, salt, depth=None, dens_lev=None, marker_size=2, fmt='%2.2f', pal=cm.spectral, \
tlim='None', slim='None', outfile=None):
if dens_lev is None:
dens_lev = np.arange(10,36,1)
if depth is None:
diag = plt.scatter(salt.flatten(), temp.flatten(), s=marker_size, edgecolors='none')
else:
diag = plt.scatter(salt.flatten(), temp.flatten(), c=depth.flatten(), \
s=marker_size, edgecolors='none', cmap=pal)
plt.colorbar(diag, shrink=0.9, aspect=40)
ax = plt.gca()
if tlim == 'None':
tlim = ax.get_ylim()
else:
tlim = tlim
if slim == 'None':
slim = ax.get_xlim()
else:
slim = slim
s = np.arange(slim[0],slim[1]+0.01,0.01)
t = np.arange(tlim[0],tlim[1]+0.1,0.1)
T, S = np.meshgrid(t,s)
density = pyroms_toolbox.seawater.dens(S,T) - 1000.
dc = plt.contour(s,t,density.T, levels=dens_lev, colors='k')
plt.clabel(dc, inline=1, fontsize=10, fmt=fmt)
ax.set_xlim(slim)
ax.set_ylim(tlim)
if outfile is not None:
if outfile.find('.png') != -1 or outfile.find('.svg') != -1 or \
outfile.find('.eps') != -1:
print 'Write figure to file', outfile
plt.savefig(outfile, dpi=200, facecolor='w', edgecolor='w', \
orientation='portrait')
else:
print 'Unrecognized file extension. Please use .png, .svg or .eps file extension.'
| bsd-3-clause |
bradkav/AntiparticleDM | analysis/PlotExposure.py | 1 | 4923 | #!/usr/bin/python
"""
PlotExposure.py
Plot discrimination significance as a function of
BJK 23/06/2017
"""
import numpy as np
from numpy import pi
from scipy.integrate import quad
from scipy.interpolate import interp1d, interp2d
from scipy import ndimage
from matplotlib.ticker import MultipleLocator
import os.path
import sys
import CalcParamPoint as CPP
#------ Matplotlib parameters ------
import matplotlib.pyplot as pl
import matplotlib as mpl
import matplotlib.colors as colors
font = {'family' : 'sans-serif',
'size' : 16}
#Edit to 16 here!
mpl.rcParams['xtick.major.size'] = 8
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['xtick.minor.size'] = 3
mpl.rcParams['xtick.minor.width'] = 1
mpl.rcParams['ytick.major.size'] = 8
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['ytick.minor.size'] = 3
mpl.rcParams['ytick.minor.width'] = 1
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rc('font', **font)
#---------------------------------
#----Run Parameters---
mx = 50
R0 = sys.argv[1]
print " Plotting discrimination significance for lambda_n/lambda_p = " + str(R0)+ " ..."
#---Functions----
#Read in a list of significances for a given point (pID) in parameter space
#for a given reconstruction (reconID)
def getSigvals_exp(reconID, expID):
#Filename for results file
fname = "../results/" + reconID + "_exposure/Results_r" + R0 + "_exp" + str(expID) +'.txt'
#Check if the file exists (and clean up the data if necessary)
if (os.path.exists(fname)):
data=np.loadtxt(fname)
#Set infinite values to a large number...(say 10 sigma)
data[data == float('+inf')] = 10.0
data = data[~np.isnan(data)]
if (len(data) == 0):
data = [0]
else:
print " Error: File not found - " + fname
data = np.zeros(1)
return np.sort(data)
#Calculate significance (median, mean, upper, lower) for a given point and reconstruction
def getSignificance_exp(reconID, expID, kind="Median"):
sigvals = getSigvals_exp(reconID, expID)
Nsamps = len(sigvals)
if (kind == "Mean"):
return np.mean(sigvals)
if (kind == "Median"):
return np.median(sigvals)
if (kind == "Upper"):
return np.percentile(sigvals, 84.0)
if (kind == "Lower"):
return np.percentile(sigvals, 16.0)
ind = int(np.round(Nsamps*0.1))
return sigvals[ind]
#----Calculations---
Nvals = 32
exprange = np.round(np.logspace(2, 5, Nvals))
#Significances for ensemble A
sig_med_A = exprange*0.0
sig_upper_A = exprange*0.0
sig_lower_A = exprange*0.0
#Significances for ensemble D
sig_med_D = exprange*0.0 + 1e-45
sig_upper_D = exprange*0.0 + 1e-45
sig_lower_D = exprange*0.0 + 1e-45
reconID_A = "AP_ExptA_" + str(mx)
for i in range(Nvals):
sig_med_A[i] = getSignificance_exp(reconID_A, i+1, kind="Median") + 1e-45
sig_upper_A[i] = getSignificance_exp(reconID_A, i+1, kind="Upper") + 1e-45
sig_lower_A[i] = getSignificance_exp(reconID_A, i+1, kind="Lower") + 1e-45
reconID_D = "AP_ExptD_" + str(mx)
for i in range(Nvals):
sig_med_D[i] = getSignificance_exp(reconID_D, i+1, kind="Median") + 1e-45
sig_upper_D[i] = getSignificance_exp(reconID_D, i+1, kind="Upper") + 1e-45
sig_lower_D[i] = getSignificance_exp(reconID_D, i+1, kind="Lower") + 1e-45
#------Plotting---------
fig = pl.figure(figsize=(8,6))
ax1 = fig.add_subplot(111)
#Plot significance as a function of exposure (in ton years)
lmed_Si, = ax1.loglog(exprange/1e3, sig_med_A, linewidth=1.5, color='DarkGreen')
lband_Si = ax1.fill_between(exprange/1e3, sig_lower_A, sig_upper_A, color='ForestGreen', alpha = 0.4)
lmed_D, = ax1.loglog(exprange/1e3, sig_med_D, linewidth=1.5, color='DarkBlue')
lband_D = ax1.fill_between(exprange/1e3, sig_lower_D, sig_upper_D, color='Navy', alpha = 0.4)
leg = ax1.legend([(lmed_Si, lband_Si),(lmed_D, lband_D)], [r"Si", r"Ge + CaWO$_4$"], loc="lower right",frameon=False,fontsize=16.0)
#Sort out labels and ticks
ax1.text(0.12, 8.4, r"Fixed Xe + Ar exposure", ha="left")
ax1.text(0.12, 6.9, r"$m_\chi = " + str(mx) + "\,\,\mathrm{GeV}$; $f = -0.995$", ha="left")
ax1.text(0.12, 5.7, r"$\lambda_n/\lambda_p = "+ R0 + "$", ha="left")
#ax1.set_xlim(-1.00, -0.94)
ax1.set_ylim(0.5, 10)
ax1.set_yticks([0.5, 1, 2, 3, 4, 5, 10])
ax1.set_yticklabels([r'$0.5\sigma$', r'$1\sigma$',\
r'$2\sigma$',r'$3\sigma$',r'$4\sigma$', r'$5\sigma$', r'$10\sigma$'])
ax1.set_xticks([0.1, 1, 10, 100])
ax1.set_xticklabels([0.1, 1, 10, 100])
ax1.set_ylabel('Discrimination significance',fontsize=18)
ax1.set_xlabel('Exposure [ton yr]',fontsize=18)
#Add some lines for 3, 5 sigma and 3 ton years
ax1.axvline(3, ymin=0, ymax = 1.00,linestyle='--', color='k')
ax1.axhline(3, linestyle=':', color='k',linewidth=1.5)
ax1.axhline(5, linestyle=':', color='k', linewidth=1.5)
pl.tight_layout()
pl.savefig("../plots/Exposure_R="+R0+".pdf", bbox_inches="tight")
#pl.show() | mit |
fzalkow/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 250 | 2233 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
meteorcloudy/tensorflow | tensorflow/examples/learn/multiple_gpu.py | 39 | 3957 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: Dict of input `Tensor`.
labels: Label `Tensor`.
mode: One of `ModeKeys`.
Returns:
`EstimatorSpec`.
"""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
with tf.device('/device:GPU:1'):
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
with tf.device('/device:GPU:2'):
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(
loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
lpryszcz/bin | modifications2signatures.py | 1 | 10514 | #!/usr/bin/env python
desc="""Generate RT signatures for modifications
"""
epilog="""Author:
[email protected]
Warsaw, 1/12/2017
"""
import os, sys
reload(sys)
sys.setdefaultencoding('utf8')
import re, subprocess
from datetime import datetime
from collections import Counter
from modifications2rna import fasta_parser, table2modifications
import numpy as np
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend
import matplotlib.pyplot as plt
import urllib, urllib2
#find stats of the reads in mpileup
##http://samtools.sourceforge.net/pileup.shtml
read_start_pat = re.compile('\^.')
indel_pat = re.compile('[+-]\d+')
def load_modifications(rna, wt=set('ACGU'), log=sys.stderr):
"""Return dictionary with modifications for each ref.
Coordinates are 1-based / GTF style"""
log.write("Loading modifications...\n")
# parse fasta
ii = 0
mods, unmods = {}, {}
for name, seq in fasta_parser(rna):
# store bases
for i, b in enumerate(seq, 1):
ii += 1
if b=="_":
pass
elif b in wt:
if b not in unmods:
unmods[b] = []
else:
if b not in mods:
mods[b] = []
mods[b].append("%s:%s"%(name,i))
log.write(" %s bases with %s modifications (%s unique)\n"%(ii, sum(map(len, mods.itervalues())), len(mods)))
return mods, unmods
def _remove_indels(alts):
"""Return mpileup without indels and read start/end marks and number of insertions and deletions at given position
.$....,,,,....,.,,..,,.,.,,,,,,,....,.,...,.,.,....,,,........,.A.,...,,......^0.^+.^$.^0.^8.^F.^].^],
........,.-25ATCTGGTGGTTGGGATGTTGCCGCT..
"""
ends = alts.count('$')
# But first strip start/end read info.
starts = read_start_pat.split(alts)
alts = "".join(starts).replace('$', '')
ends += len(starts)-1
# count indels
indels = {"+": 0, "-": alts.count('*')}
# remove indels info
m = indel_pat.search(alts)
while m:
# remove indel
pos = m.end() + int(m.group()[1:])
# count insertions and deletions
indels[m.group()[0]] += 1
alts = alts[:m.start()] + alts[pos:]
# get next match
m = indel_pat.search(alts, m.start())
return alts, indels["+"], indels["-"], ends
def genotype_region(bams, dna, region, minDepth, mpileup_opts, alphabet='ACGT'):
"""Start mpileup"""
# open subprocess #'-f', dna,
args = ['samtools', 'mpileup', '-r', region] + mpileup_opts.split() + bams
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) #, bufsize=65536)
# process lines
data, ends = [], []
for line in proc.stdout:
data.append([])
ends.append([])
lineTuple = line.strip().split('\t')
# get coordinate
contig, pos, baseRef = lineTuple[:3]
baseRef, refFreq = [baseRef], [1.0]
samplesData = lineTuple[3:]
# process bam files
for alg in samplesData[1::3]:
# remove indels & get base counts
alts, ins, dels, _ends = _remove_indels(alg)
counts = [alts.upper().count(b) for b in alphabet] + [ins, dels]
data[-1].append(counts)
ends[-1].append(_ends)
'''if sum(counts)>=minDepth:
data[-1].append(counts)
else:
data[-1].append([0]*len(counts))'''
return data, ends
def array2plot(outfn, mod, title, cm, pos, window, width=0.75, alphabet='ACGT+-',
colors=['green', 'blue', 'orange', 'red', 'grey', 'black']):
"""Genotype positions"""
fig = plt.figure(figsize=(7, 4+3*len(pos)))
fig.suptitle(title, fontsize=20)
ind = np.arange(-window-width/2, window)
for j in range(cm.shape[0]):
ax = fig.add_subplot(len(pos), 1, j+1)
ax.set_title(pos[j])
ax.set_ylim(0,1)
# plot stacked barplot
bottom = np.zeros(len(ind))
for i in range(len(ind)):
p = ax.bar(ind, cm[j,:,i], width, label=alphabet[i], color=colors[i], bottom=bottom)
bottom += cm[j,:,i]
#fig.show()#; sys.exit() #format=fformat,
fig.savefig(outfn, dpi=100, orientation='landscape', transparent=False)
if len(pos)>1:
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(1, 1, 1)
ax.set_title("collapsed signal for %s"%mod)
ax.set_ylim(0,1)
# plot combined plot for all positions
cmm = cm.mean(axis=0)
# plot stacked barplot
bottom = np.zeros(len(ind))
for i in range(len(ind)):
p = ax.bar(ind, cmm[:,i], width, label=alphabet[i], color=colors[i], bottom=bottom)
bottom += cmm[:,i]
fig.savefig(".".join(outfn.split('.')[:-1])+".collapsed."+outfn.split('.')[-1],
dpi=100, orientation='landscape', transparent=False)
# clear
fig.clear(); del fig
def pos2logo(outdir, mod, c, pos, window=2, alphabet='ACGT', ext="svg"):
"""Store logo for each position"""
#
url = 'http://weblogo.threeplusone.com/create.cgi' # "alphabet": auto
values = {'sequences': '', 'format': ext, 'stack_width': 'medium', 'stack_per_line': '40',
'alphabet': "alphabet_dna", 'ignore_lower_case': True, 'unit_name': "bits", 'first_index': '1',
'logo_start': '1', 'logo_end': str(2*window+1), 'composition': "comp_auto", 'percentCG': '',
'scale_width': True, 'show_errorbars': True, 'logo_title': '', 'logo_label': '',
'show_xaxis': True, 'xaxis_label': '', 'show_yaxis': True, 'yaxis_label': '',
'yaxis_scale': 'auto', 'yaxis_tic_interval': '1.0', 'show_ends': True, 'show_fineprint': True,
'color_scheme': 'color_auto', 'symbols0': '', 'symbols1': '', 'symbols2': '', 'symbols3': '',
'symbols4': '', 'color0': '', 'color1': '', 'color2': '', 'color3': '', 'color4': ''}
# combine replicas
csum = c.sum(axis=2)
for i, p in enumerate(pos):
freqs = ["P0\tA\tC\tG\tT\n"]
for j in range(csum.shape[1]):
freqs.append("%s\t%s\n"%(str(j).zfill(2), "\t".join(map(str, csum[i][j][:len(alphabet)]))))
# communicate with server and store png
values["sequences"] = "".join(freqs)
data = urllib.urlencode(values).encode("utf-8")
req = urllib2.Request(url, data)
response = urllib2.urlopen(req);
outfn = os.path.join(outdir, "logo.%s.%s.%s"%(mod, p, ext))
with open(outfn, "wb") as f:
im = response.read()
f.write(im)
def modifications2signatures(outdir, bams, dna, rna, table, minDepth, mpileup_opts, verbose, log=sys.stdout, window=2):
"""Generate RT signatures for modifications"""
mod2base, mod2name = table2modifications(table)
if not os.path.isdir(outdir):
os.makedirs(outdir)
# load modifications
mods, unmods = load_modifications(rna)
# write header
log.write("#code\tmodification\toccurencies\tavg cov\tcov std/avg\tA\tC\tG\tT\tins\tdel\n")
for mod, pos in mods.iteritems(): #list(mods.iteritems())[-10:]:
data, ends = [], []
for p in pos:
ref = p.split(':')[0]
i = int(p.split(':')[1])
s, e = i-window, i+window
if s<1:
continue
region = "%s:%s-%s"%(ref, s, e)
_data, _ends = genotype_region(bams, dna, region, minDepth, mpileup_opts)
if len(_data)==2*window+1:
data.append(_data)
ends.append(_ends)
if not data:
continue
# normalise 0-1 freq
c, e = np.array(data), np.array(ends)#; print c.shape, e.shape
if len(c.shape)<4:
sys.stderr.write("[WARNING] Wrong shape for %s: %s\n"%(mod, c.shape))
continue
csum = c.sum(axis=3, dtype='float')
csum[csum<1] = 1
cn = 1.*c/csum[:,:,:,np.newaxis]
# average over all replicas
cm = cn.mean(axis=2)
# mean cov & cov var (stdev / mean)
cov = csum.mean(axis=2).mean(axis=0)
covvar = cov.std() / cov.mean()
# average over all positions
cmm = cm.mean(axis=0)
log.write("%s\t%s\t%s\t%.2f\t%.3f\t%s\n"%(mod, mod2name[mod], len(pos), cov[window], covvar, "\t".join("%.3f"%x for x in cmm[window])))
# plot base freq
outfn = os.path.join(outdir, "mods.%s.png"%mod2name[mod])
title = "%s [%s] in %s position(s) (%sx)"%(mod2name[mod], mod, len(pos), cov[window])
array2plot(outfn, mod2name[mod], title, cm, pos, window)
# store logo
try:
pos2logo(outdir, mod2name[mod], c, pos, window)
except Exception, e:
sys.stderr.write("[ERROR][pos2logo] %s\n"%str(e))
def main():
import argparse
usage = "%(prog)s -v"
parser = argparse.ArgumentParser(usage=usage, description=desc, epilog=epilog)
parser.add_argument("-v", dest="verbose", default=False, action="store_true", help="verbose")
parser.add_argument('--version', action='version', version='1.1')
parser.add_argument("-o", "--outdir", default="mod2sig", help="output directory [%(default)s]")
parser.add_argument("-b", "--bam", nargs="+", help="BAM files to process")
parser.add_argument("-d", "--dna", required=1, help="DNA FastA")
parser.add_argument("-r", "--rna", required=1, help="RNA FastA")
parser.add_argument("-t", "--table", default="modifications.txt", help="modification table [%(default)s]" )
parser.add_argument("-m", "--mpileup_opts", default="-A -q 15 -Q 20", help="options passed to mpileup [%(default)s]")
parser.add_argument("--minDepth", default=100, type=int, help="minimal depth [%(default)s]")
# parser.add_argument("-f", "--minFreq", default=0.8, type=float, help="min frequency of alternative base [%(default)s]")
o = parser.parse_args()
if o.verbose:
sys.stderr.write( "Options: %s\n" % str(o) )
modifications2signatures(o.outdir, o.bam, o.dna, o.rna, o.table, o.minDepth, o.mpileup_opts, o.verbose)
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
dt = datetime.now()-t0
sys.stderr.write( "#Time elapsed: %s\n" % dt )
| gpl-3.0 |
ycaihua/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 106 | 2313 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware than the time complexity
is polynomial.
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data[:n_samples])
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause |
DJArmstrong/autovet | Features/old/Centroiding/scripts/old/load_stacked_image.py | 2 | 7068 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 18 11:27:46 2016
@author:
Maximilian N. Guenther
Battcock Centre for Experimental Astrophysics,
Cavendish Laboratory,
JJ Thomson Avenue
Cambridge CB3 0HE
Email: [email protected]
"""
import warnings
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models, fitting
from matplotlib.colors import LogNorm
import os, sys, socket
import glob
import fitsio
from ngtsio import ngtsio
from photutils.background import Background
from photutils import daofind
from astropy.stats import sigma_clipped_stats
from photutils import CircularAperture
def standard_fnames(fieldname, root=None):
if (root is None):
#::: on laptop (OS X)
if sys.platform == "darwin":
root = '/Users/mx/Big_Data/BIG_DATA_NGTS/2016/STACKED_IMAGES/'
#::: on Cambridge servers
elif 'ra.phy.cam.ac.uk' in socket.gethostname():
root = '/appch/data/mg719/ngts_pipeline_output/STACKED_IMAGES/'
filename = glob.glob( os.path.join( root, 'DITHERED_STACK_'+fieldname+'*.fits' ) )[0]
return filename
def load(fieldname):
filename = standard_fnames(fieldname)
data = fitsio.read( filename )
return data
def plot(fieldname, x, y, r=15, ax=None, show_apt=True):
'''
Note: it is important to transform the coordinates!
resolution of stacked images: (8192, 8192)
resolution of normal CCD images: (2048, 2048)
-> scaling factor of 4
x and y must additionally be shifted by - 0.5 + 0.5/scale
due to miss-match of different image origins in fits vs python vs C
y axis is originally flipped if image is laoded via fitsio.read
'''
scale = 4
x = x - 0.5 + 0.5/scale
y = y - 0.5 + 0.5/scale
stacked_image = load(fieldname)
# bkg = np.nanmedian(stacked_image)
# stacked_image -= bkg
x0 = np.int( x - r )*scale #*4 as stacked images have 4 x higher resolution than normal images
x1 = np.int( x + r )*scale
y0 = np.int( y - r )*scale
y1 = np.int( y + r )*scale
stamp = stacked_image[ y0:y1, x0:x1 ] #x and y are switched in indexing
bkg = Background(stamp, (50,50), filter_shape=(3, 3), method='median')
stamp -= bkg.background
if ax is None:
fig, ax = plt.subplots(figsize=(6,6))
im = ax.imshow( stamp, norm=LogNorm(vmin=1, vmax=1000), interpolation='nearest', origin='lower', extent=1.*np.array([x0,x1,y0,y1])/scale )
plt.colorbar(im)
if show_apt == True:
ax.scatter( x, y, c='r' )
circle1 = plt.Circle((x, y), 3, color='r', fill=False, linewidth=3)
ax.add_artist(circle1)
fit_Gaussian2D(stamp)#, x, y, x0, x1, y0, y1)
def remove_bkg(z):
bkg = Background(z, (50,50), filter_shape=(3, 3), method='median')
plt.figure()
im = plt.imshow(z, norm=LogNorm(vmin=1, vmax=1000), origin='lower', cmap='Greys')
plt.colorbar(im)
plt.figure()
im = plt.imshow(bkg.background, origin='lower', cmap='Greys')
plt.colorbar(im)
plt.figure()
im = plt.imshow(z - bkg.background, norm=LogNorm(vmin=1, vmax=1000), interpolation='nearest', origin='lower')
plt.colorbar(im)
def fit_Gaussian2D(z):
# xx, yy = np.meshgrid( np.arange(x0, x1), np.arange(y0, y1) )
x = np.arange(0,z.shape[0])
y = np.arange(0,z.shape[1])
xx, yy = np.meshgrid( x,y )
plt.figure()
im = plt.imshow(z, norm=LogNorm(vmin=1, vmax=1000), origin='lower', cmap='Greys')
plt.colorbar(im)
#::: PHOTUTILS SOURCE DETECTION
data = z
mean, median, std = sigma_clipped_stats(data, sigma=3.0, iters=5)
sources = daofind(data - median, fwhm=4.0, threshold=4.*std)
positions = (sources['xcentroid'], sources['ycentroid'])
apertures = CircularAperture(positions, r=3.*4)
# plt.figure()
# plt.imshow(data, cmap='Greys', norm=LogNorm(vmin=1, vmax=1000))
apertures.plot(color='blue', lw=1.5, alpha=0.5)
#:::
amplitude_A = np.max( z )
x_A_mean = x[-1]/2
y_A_mean = y[-1]/2
x_A_stddev = 4. #assume FWHM of 1 pixel
y_A_stddev = 4.
amplitude_B = 0. #assume no second peak
x_B_mean = x[-1]/2
y_B_mean = y[-1]/2
x_B_stddev = 4. #assume FWHM of 1 pixel
y_B_stddev = 4.
# Now to fit the data create a new superposition with initial
# guesses for the parameters:
gg_init = models.Gaussian2D(amplitude_A, x_A_mean, y_A_mean, x_A_stddev, y_A_stddev) \
+ models.Gaussian2D(amplitude_B, x_B_mean, y_B_mean, x_B_stddev, y_B_stddev)
fitter = fitting.SLSQPLSQFitter()
gg_fit = fitter(gg_init, xx, yy, z)
print gg_fit
print gg_fit.parameters
print gg_fit(xx, yy)
print gg_fit(0,0)
print gg_fit(60,60)
plt.figure()
plt.imshow(gg_fit(xx,yy), label='Gaussian')
def test_plot(fieldname, x, y, r=15, ax=None):
'''
Note: it is important to transform the coordinates!
resolution of stacked images: (8192, 8192)
resolution of normal CCD images: (2048, 2048)
y axis is originally flipped if image is laoded via fitsio.read
'''
scale = 4
stacked_image = load(fieldname)
stacked_image -= 0.95*np.nanmedian(stacked_image) #200
# fig, ax1 = plt.subplots()
# ax1.hist( stacked_image.flatten(), bins=np.linspace(200, 240, 50))
x0 = np.int( x - r )*scale #*4 as stacked images have 4 x higher resolution than normal images
x1 = np.int( x + r )*scale
y0 = np.int( y - r )*scale
y1 = np.int( y + r )*scale
im = stacked_image[ y0:y1, x0:x1 ]
if ax is None:
fig, axes = plt.subplots(1,3,figsize=(18,6))
for i in range(3):
if i == 0:
x = x
y = y
title = 'no shift'
elif i == 1:
x = x - 0.5 + 0.5/scale
y = y - 0.5 + 0.5/scale
title = 'shift - 3/4 * 0.5'
elif i == 2:
x = x -0.5
y = y -0.5
title = 'shift - 0.5'
ax = axes[i]
ax.imshow( im, norm=LogNorm(vmin=1, vmax=1000), interpolation='nearest', origin='lower', extent=1.*np.array([x0,x1,y0,y1])/scale )
ax.scatter( x, y, c='r' )
circle1 = plt.Circle((x, y), 3, color='r', fill=False, linewidth=3)
ax.add_artist(circle1)
ax.set_title(title)
if __name__ == '__main__':
fieldname = 'NG0304-1115'
# obj_id = 9861
# obj_id = 992
obj_id = 1703
# obj_id = 6190
# obj_id = 24503
# obj_id = 7505
# obj_id = 16335
data = ngtsio.get(fieldname, ['CCDX','CCDY'], obj_id=obj_id)
plot(fieldname, np.mean(data['CCDX']), np.mean(data['CCDY']), r=15) #obj 9861
# plot('NG0304-1115', 1112, 75, r=15)
| gpl-3.0 |
georgid/sms-tools | lectures/6-Harmonic-model/plots-code/sines-partials-harmonics.py | 2 | 2031 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/sine-440+490.wav')
w = np.hamming(3529)
N = 16084*2
hN = N/2
t = -20
pin = 4850
x1 = x[pin:pin+w.size]
mX1, pX1 = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX1, hN, t)
pmag = mX1[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX1, pX1, ploc)
plt.figure(1, figsize=(9, 6))
plt.subplot(311)
plt.plot(fs*np.arange(0,N/2)/float(N), mX1-max(mX1), 'r', lw=1.5)
plt.plot(fs * iploc / N, ipmag-max(mX1), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([200, 1000, -80, 4])
plt.title('mX + peaks (sine-440+490.wav)')
(fs, x) = UF.wavread('../../../sounds/vibraphone-C6.wav')
w = np.blackman(401)
N = 1024
hN = N/2
t = -80
pin = 200
x2 = x[pin:pin+w.size]
mX2, pX2 = DFT.dftAnal(x2, w, N)
ploc = UF.peakDetection(mX2, hN, t)
pmag = mX2[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX2, pX2, ploc)
plt.subplot(3,1,2)
plt.plot(fs*np.arange(0,N/2)/float(N), mX2-max(mX2), 'r', lw=1.5)
plt.plot(fs * iploc / N, ipmag-max(mX2), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([500,10000,-100,4])
plt.title('mX + peaks (vibraphone-C6.wav)')
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
w = np.blackman(651)
N = 2048
hN = N/2
t = -80
pin = 10000
x3 = x[pin:pin+w.size]
mX3, pX3 = DFT.dftAnal(x3, w, N)
ploc = UF.peakDetection(mX3, hN, t)
pmag = mX3[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX3, pX3, ploc)
plt.subplot(3,1,3)
plt.plot(fs*np.arange(0,N/2)/float(N), mX3-max(mX3), 'r', lw=1.5)
plt.plot(fs * iploc / N, ipmag-max(mX3), marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([0,6000,-70,2])
plt.title('mX + peaks (oboe-A4.wav)')
plt.tight_layout()
plt.savefig('sines-partials-harmonics.png')
plt.show()
| agpl-3.0 |
xapharius/mrEnsemble | Engine/src/protocol/n_image_input_protocol.py | 2 | 2336 | '''
Created on Apr 9, 2014
@author: Simon
'''
import matplotlib
from utils import logging
# !important: tell matplotlib not to try rendering to a window
matplotlib.use('Agg')
import io
import struct
from skimage import io as skio
import base64
class NImageInputProtocol(object):
'''
MrJob input protocol that reads a number of PNG images that are encoded as
follows: Base64 of | no. of bytes of image (4 bytes) | image bytes | ...
The result is a list of numpy arrays.
The write method encodes a list of images (numpy arrays) as base64 byte
string in the same way as the input for the read method.
'''
def read(self, data):
key, enc_value = data.split('\t', 1)
value = base64.b64decode(enc_value)
pos = 0
image_arrs = []
logging.info('decoded number of bytes: ' + str(len(value)))
while pos < len(value):
image_len = struct.unpack('>i', value[pos:pos+4])[0]
pos += 4
logging.info('reading image of length: ' + str(image_len) + '\n')
image_arr = skio.imread(io.BytesIO(value[pos:pos + image_len]))
logging.info('done reading')
image_arrs.append(image_arr)
pos += image_len
logging.info('Got ' + str(len(image_arrs)) + ' images')
return key, image_arrs
def write(self, key, img_list):
logging.info('Writing ' + str(len(img_list)) + ' images')
byte_stream = io.BytesIO()
for img in img_list:
# get image bytes
temp_stream = io.BytesIO()
skio.imsave(temp_stream, img)
img_bytes = temp_stream.getvalue()
temp_stream.close()
# get length of bytes in four bytes
img_len = len(img_bytes)
logging.info('Writing image of length ' + str(img_len))
len_bytes = bytearray(struct.pack('>i', img_len))
# save length and image bytes to the result
byte_stream.write(str(len_bytes))
byte_stream.write(img_bytes)
final_bytes = byte_stream.getvalue()
byte_stream.close()
encoded = base64.b64encode(final_bytes)
logging.info('Done writing. Final number of bytes: ' + str(len(final_bytes)))
return '%s\t%s' % (key, encoded)
| mit |
markovmodel/PyEMMA | pyemma/plots/plots1d.py | 2 | 4667 |
# This file is part of PyEMMA.
#
# Copyright (c) 2018 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as _np
__author__ = 'thempel'
def plot_feature_histograms(xyzall,
feature_labels=None,
ax=None,
ylog=False,
outfile=None,
n_bins=50,
ignore_dim_warning=False,
**kwargs):
r"""Feature histogram plot
Parameters
----------
xyzall : np.ndarray(T, d)
(Concatenated list of) input features; containing time series data to be plotted.
Array of T data points in d dimensions (features).
feature_labels : iterable of str or pyemma.Featurizer, optional, default=None
Labels of histogramed features, defaults to feature index.
ax : matplotlib.Axes object, optional, default=None.
The ax to plot to; if ax=None, a new ax (and fig) is created.
ylog : boolean, default=False
If True, plot logarithm of histogram values.
n_bins : int, default=50
Number of bins the histogram uses.
outfile : str, default=None
If not None, saves plot to this file.
ignore_dim_warning : boolean, default=False
Enable plotting for more than 50 dimensions (on your own risk).
**kwargs: kwargs passed to pyplot.fill_between. See the doc of pyplot for options.
Returns
-------
fig : matplotlib.Figure object
The figure in which the used ax resides.
ax : matplotlib.Axes object
The ax in which the historams were plotted.
"""
if not isinstance(xyzall, _np.ndarray):
raise ValueError('Input data hast to be a numpy array. Did you concatenate your data?')
if xyzall.shape[1] > 50 and not ignore_dim_warning:
raise RuntimeError('This function is only useful for less than 50 dimensions. Turn-off this warning '
'at your own risk with ignore_dim_warning=True.')
if feature_labels is not None:
if not isinstance(feature_labels, list):
from pyemma.coordinates.data.featurization.featurizer import MDFeaturizer as _MDFeaturizer
if isinstance(feature_labels, _MDFeaturizer):
feature_labels = feature_labels.describe()
else:
raise ValueError('feature_labels must be a list of feature labels, '
'a pyemma featurizer object or None.')
if not xyzall.shape[1] == len(feature_labels):
raise ValueError('feature_labels must have the same dimension as the input data xyzall.')
# make nice plots if user does not decide on color and transparency
if 'color' not in kwargs.keys():
kwargs['color'] = 'b'
if 'alpha' not in kwargs.keys():
kwargs['alpha'] = .25
import matplotlib.pyplot as _plt
# check input
if ax is None:
fig, ax = _plt.subplots()
else:
fig = ax.get_figure()
hist_offset = -.2
for h, coordinate in enumerate(reversed(xyzall.T)):
hist, edges = _np.histogram(coordinate, bins=n_bins)
if not ylog:
y = hist / hist.max()
else:
y = _np.zeros_like(hist) + _np.NaN
pos_idx = hist > 0
y[pos_idx] = _np.log(hist[pos_idx]) / _np.log(hist[pos_idx]).max()
ax.fill_between(edges[:-1], y + h + hist_offset, y2=h + hist_offset, **kwargs)
ax.axhline(y=h + hist_offset, xmin=0, xmax=1, color='k', linewidth=.2)
ax.set_ylim(hist_offset, h + hist_offset + 1)
# formatting
if feature_labels is None:
feature_labels = [str(n) for n in range(xyzall.shape[1])]
ax.set_ylabel('Feature histograms')
ax.set_yticks(_np.array(range(len(feature_labels))) + .3)
ax.set_yticklabels(feature_labels[::-1])
ax.set_xlabel('Feature values')
# save
if outfile is not None:
fig.savefig(outfile)
return fig, ax
| lgpl-3.0 |
soulmachine/scikit-learn | sklearn/cluster/spectral.py | 15 | 17944 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is k
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if not assign_labels in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
mariosky/databook | ejemplos/peliculas_taquilleras/movie_budget_gross.py | 1 | 1403 | # -*- coding: utf-8 -*-
from re import sub
from decimal import Decimal
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker
file = open('gross.dat')
data = [ (title, Decimal( sub(r'[^\d.]', '', budget)),Decimal( sub(r'[^\d.]', '', gross)) )
for title, budget, gross in [line[:-1].split('|') for line in file ]]
data_array = np.array(data)
budget = np.array( data_array[:, 1 ],dtype='int')
gross = np.array( data_array[:, 2 ] ,dtype='int')
fig, ax = plt.subplots(1, 1)
#ax.set_xscale('log')
#ax.set_yscale('log')
ax.set_xlim(900, 500000000)
def format_fn(tick_val, tick_pos):
format = matplotlib.ticker.EngFormatter()
return '$' + format.format_data(tick_val)
ax.set_title(u'Presupuesto de producción vs recaudación en taquilla (sin ajuste a la inflación)')
ax.set_xlabel(u'Presupuesto')
ax.set_ylabel(u'Recaudación')
#ax.set_xlabel(u'Presupuesto en escala logarítmica')
#ax.set_ylabel(u'Recaudación en escala logarítmica')
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(format_fn))
ax.get_yaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(format_fn))
__=ax.plot(budget, gross, '.')
plt.show()
import seaborn as sns
g = sns.jointplot(x=gross, y=budget, xlim=[900, 500000000], ylim=[900,10000000])
ax = g.ax_joint
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(900, 500000000)
plt.show() | apache-2.0 |
weegreenblobbie/sd_audio_hackers | 20160626_spectrograms_explained/media/make_plots.py | 1 | 4732 | import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches
import numpy as np
import Nsound as ns
def main():
matplotlib.rc('font', size = 24)
matplotlib.rc('figure', figsize = [16, 6])
matplotlib.rcParams.update({'figure.subplot.left' : 0.09 })
matplotlib.rcParams.update({'figure.subplot.bottom': 0.15 })
matplotlib.rcParams.update({'figure.subplot.right' : 0.97 })
matplotlib.rcParams.update({'figure.subplot.top' : 0.88 })
sr = 1000
#--------------------------------------------------------------------------
# figure 1
gen = ns.Sine(sr)
signal = ns.AudioStream(sr, 1)
signal << gen.generate(1.0, 3)
signal.plot('3 Hz Signal')
fig = plt.gcf()
ax = plt.gca()
blue_line = ax.lines[0]
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.05, 1.05])
plt.ylim([-1.05, 1.05])
plt.savefig('figure_1-0.svg')
# plot sub-sampled signal in time
buf = signal[0]
step = len(signal) // 32
y = buf[0:-1:step]
t = np.linspace(0, 1.0, len(signal))[0:-1:step]
red_lines = []
for tt, yy in zip(t, y):
l = plt.axvline(x = tt, color = 'red')
red_lines.append(l)
plt.savefig('figure_1-2.svg')
plt.plot(t, y, 'ro')
plt.savefig('figure_1-3.svg')
# remove blue line & red lines
blue_line.remove()
for l in red_lines:
l.remove()
# draw lolli pop
for tt, yy in zip(t, y):
plt.plot([tt, tt], [0, yy], 'b-', zorder = -1)
fig.canvas.draw()
plt.savefig('figure_1-4.svg')
#--------------------------------------------------------------------------
# figure 2
signal.plot('3 Hz Signal')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.05, 1.05])
plt.ylim([-1.05, 1.05])
plt.savefig('figure_2-0.svg')
# multiply the signal by a gaussian
s1 = signal * gen.drawGaussian(1.0, 0.33, 0.15)
s1.plot('3 Hz Signal * env')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.05, 1.05])
plt.ylim([-1.05, 1.05])
plt.savefig('figure_2-1.svg')
# multiply the signal by a gaussian
s2 = signal * gen.drawGaussian(1.0, 0.5, 0.15)
s2.plot('3 Hz Signal * env')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.05, 1.05])
plt.ylim([-1.05, 1.05])
plt.savefig('figure_2-2.svg')
# multiply the signal by a gaussian
s3 = signal * gen.drawGaussian(1.0, 0.66, 0.15)
s3.plot('3 Hz Signal * env')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.05, 1.05])
plt.ylim([-1.05, 1.05])
plt.savefig('figure_2-3.svg')
# multiply the signal by a gaussian
s4 = signal * (0.05 + gen.drawGaussian(1.0, 0.66, 0.15))
s4.normalize();
s4.plot('3 Hz Signal & ???')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.05, 1.05])
plt.ylim([-1.05, 1.05])
plt.savefig('figure_2-4.svg')
#--------------------------------------------------------------------------
# figure 3
signal.plot('3 Hz Signal')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.15, 1.15])
plt.ylim([-1.15, 1.15])
plt.savefig('figure_3-0.svg')
# add red rectangle
cx = 0.5
cy = 0
w = 1.10
h = 2.10
xy = [cx - 0.5 * w, cy - 0.5 * h]
r = matplotlib.patches.Rectangle(
xy,
width = w,
height = h,
ec = 'red',
fc = 'none',
)
ax = plt.gca()
ax.add_patch(r)
plt.savefig('figure_3-0.svg')
# shrink rectangle
w *= 0.666
x = cx - 0.5 * w
r.set_x(x)
r.set_width(w)
ax.figure.canvas.draw()
plt.savefig('figure_3-1.svg')
# shrink rectangle
w *= 0.333
x = cx - 0.5 * w
r.set_x(x)
r.set_width(w)
ax.figure.canvas.draw()
plt.savefig('figure_3-2.svg')
#--------------------------------------------------------------------------
# figure 4
sig = signal
time_axis = np.linspace(0, 1.0, len(sig))
rec_dict = dict(xy = (0,-1), width=1, height=2, ec = 'red', fc = 'none')
freqs = [6, 4.5, 3.3, 3.1]
for i, f in enumerate(freqs):
sig2 = gen.drawSine(1.0, f)
tones = (sig + sig2) / 2.0
plt.figure()
plt.plot(time_axis, tones[0].toList())
plt.grid(True)
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.xlim([-0.15, 1.15])
plt.ylim([-1.15, 1.15])
plt.title('3 Hz + %.1f Hz' % f)
r = matplotlib.patches.Rectangle(**rec_dict)
ax = plt.gca()
ax.add_patch(r)
plt.savefig('figure_4-%d.svg' % i)
plt.show()
if __name__ == "__main__":
main() | mit |
wbawakate/baby-unchi | Phase0/intaractive_patch_creator.py | 1 | 2496 | import os
import re
import cv2
import numpy as np
import random
import matplotlib
import matplotlib.pyplot as plt
def intaractive_patch_creator(src_dir, dis_t_dir, dis_f_dir,
show_size=256, patch_size=128,
n_patch=float("inf")):
"""
This function make batches by human (especially doctors).
repeat:
1. show a picture and range randomly.
2. push 1 if the patch is effective for diagnosis else 0.
3. save the patch to dis_(t or f)_dir.
"""
""" dis dir should be empty """
if len([file for file in os.listdir(dis_t_dir) if (".bmp" in file)]) != 0:
print "error:", dis_t_dir, "does not emply"
return
if len([file for file in os.listdir(dis_f_dir) if (".bmp" in file)]) != 0:
print "error:", dis_f_dir, "does not emply"
return
files = [file for file in os.listdir(src_dir) if (".jpg" in file)]
id = 0
while True:
""" select image randomly """
file = random.choice(files)
print file
img = cv2.imread(src_dir+file)
""" resize image """
resized_y = show_size
resized_x = int(float(show_size) / img.shape[0] * img.shape[1])
resized_img = cv2.resize(img, (resized_x, resized_y))
resized_img_show = cv2.resize(img, (resized_x, resized_y))
""" select patch's position randomly"""
x = random.randint(0, resized_x-patch_size)
y = random.randint(0, resized_y-patch_size)
cv2.rectangle(resized_img_show,(x,y),(x+patch_size,y+patch_size),(0,0,255),3)
patch = resized_img[y:y+patch_size, x:x+patch_size]
""" show and save """
print "effective: 1, ineffective: 0, quit: q, skip: other key"
cv2.imshow("result", resized_img_show)
keycode = cv2.waitKey(0)
if keycode == ord("1"):
cv2.imwrite(dis_t_dir+str(id)+".bmp", patch)
elif keycode == ord("0"):
cv2.imwrite(dis_f_dir+str(id)+".bmp", patch)
elif keycode == ord("q"):
print "exit program"
break
else:
print "skip"
continue
id += 1
if id >= n_patch:
break
if __name__ == "__main__":
intaractive_patch_creator(src_dir="image/",
dis_t_dir="trem/t/",
dis_f_dir="trem/f/",
n_patch=3)
| mit |
ansteh/strapping | python/batch-by-split.py | 1 | 1518 | import json
import numpy as np
import pandas as pd
def get_json_data(filename):
with open(filename) as data_file:
return json.load(data_file)
class Batch():
def __init__(self, data, batch_size=None):
self.data = np.array(data)
self.batch_size = batch_size
self.shuffle().split().batch()
# print(self.batches)
def shuffle(self):
np.random.shuffle(self.data)
return self
def split(self, train_percent=.6, validate_percent=.2, seed=None):
m = len(self.data)
train_end = int(train_percent * m)
validate_end = int(validate_percent * m) + train_end
split = np.split(self.data, [train_end, validate_end, m])
self.train, self.validate, self.test = split[0], split[1], split[2],
# print(self.train.shape)
# print(self.validate.shape)
# print(self.test.shape)
return self
def batch(self):
self.batches = np.array([])
length = len(self.train)
rest = length % self.batch_size
if(rest != 0):
mark = int(length-rest)
left = np.split(self.train[:mark], self.batch_size)
right = np.array(self.train[mark:])
self.batches = left
for i in range(len(right)):
self.batches[i] = np.append(left[i], [right[i]], axis=0)
else:
self.batches = np.split(self.train, self.batch_size)
return self
data = get_json_data('pareto.json')
batch = Batch(data, 11)
| mit |
RobertABT/heightmap | build/matplotlib/lib/matplotlib/offsetbox.py | 4 | 51692 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
from __future__ import print_function
import warnings
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch, FancyArrowPatch
from matplotlib import rcParams
from matplotlib import docstring
#from bboximage import BboxImage
from matplotlib.image import BboxImage
from matplotlib.patches import bbox_artist as mbbox_artist
from matplotlib.text import _AnnotationBase
DEBUG = False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
if len(w_list) > 1:
sep = (total - sum(w_list)) / (len(w_list) - 1.)
else:
sep = 0.
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh + sep) * len(w_list)
else:
sep = float(total) / (len(w_list)) - maxh
offsets = np.array([(maxh + sep) * i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Given a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analogous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h - d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left", "top"]:
descent = 0.
offsets = [d for h, d in hd_list]
elif align in ["right", "bottom"]:
descent = 0.
offsets = [height - h + d for h, d in hd_list]
elif align == "center":
descent = 0.
offsets = [(height - h) * .5 + d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def __getstate__(self):
state = martist.Artist.__getstate__(self)
# pickle cannot save instancemethods, so handle them here
from cbook import _InstanceMethodPickler
import inspect
offset = state['_offset']
if inspect.ismethod(offset):
state['_offset'] = _InstanceMethodPickler(offset)
return state
def __setstate__(self, state):
self.__dict__ = state
from cbook import _InstanceMethodPickler
if isinstance(self._offset, _InstanceMethodPickler):
self._offset = self._offset.get_instancemethod()
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def contains(self, mouseevent):
for c in self.get_children():
a, b = c.contains(mouseevent)
if a:
return a, b
return False, {}
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent, renderer):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent, renderer)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_visible_children(self):
"""
Return a list of visible artists it contains.
"""
return [c for c in self._children if c.get_visible()]
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd, renderer)
return mtransforms.Bbox.from_bounds(px - xd, py - yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes. Can be one of 'top', 'bottom',
'left', 'right', 'center' and 'baseline'
*mode* : packing mode
.. note::
*pad* and *sep* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative positions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
.. note::
*pad* and *sep* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
if self.width is not None:
for c in self.get_visible_children():
if isinstance(c, PackerBase) and c.mode == "expand":
c.set_width(self.width)
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
whd_list = [(w, h, xd, (h - yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w, h, xd, yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
sep, self.mode)
yoffsets = yoffsets_ + [yd for w, h, xd, yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjusts the relative positions of children at draw time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
.. note::
*pad* and *sep* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of children and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
whd_list = [c.get_extent(renderer)
for c in self.get_visible_children()]
if not whd_list:
return 2 * pad, 2 * pad, pad, pad, []
if self.height is None:
height_descent = max([h - yd for w, h, xd, yd in whd_list])
ydescent = max([yd for w, h, xd, yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2 * pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
sep, self.mode)
xoffsets = xoffsets_ + [xd for w, h, xd, yd in whd_list]
xdescent = whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
zip(xoffsets, yoffsets)
class PaddedBox(OffsetBox):
def __init__(self, child, pad=None, draw_frame=False, patch_attrs=None):
"""
*pad* : boundary pad
.. note::
*pad* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(PaddedBox, self).__init__()
self.pad = pad
self._children = [child]
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=1, # self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
if patch_attrs is not None:
self.patch.update(patch_attrs)
self._drawFrame = draw_frame
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
w, h, xd, yd = self._children[0].get_extent(renderer)
return w + 2 * pad, h + 2 * pad, \
xd + pad, yd + pad, \
[(0, 0)]
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(
renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
self.draw_frame(renderer)
for c in self.get_visible_children():
c.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw_frame(self, renderer):
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox)
if self._drawFrame:
self.patch.draw(renderer)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self.dpi_transform = mtransforms.Affine2D()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.dpi_transform + self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
dpi_cor = renderer.points_to_pixels(1.)
return self.width * dpi_cor, self.height * dpi_cor, \
self.xdescent * dpi_cor, self.ydescent * dpi_cor
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
if not a.is_transform_set():
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor, dpi_cor)
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if "va" not in textprops:
textprops["va"] = "baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform +
self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_text(self, s):
"set text"
self._text.set_text(s)
def get_text(self):
"get text"
return self._text.get_text()
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinates in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info, d = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[-1][0] # last line
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline:
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h - d)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(d, d_)
#else:
# d = d
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AuxTransformBox(OffsetBox):
"""
Offset Box with the aux_transform . Its children will be
transformed with the aux_transform first then will be
offseted. The absolute coordinate of the aux_transform is meaning
as it will be automatically adjust so that the left-lower corner
of the bounding box of children will be set to (0,0) before the
offset transform.
It is similar to drawing area, except that the extent of the box
is not predetermined but calculated from the window extent of its
children. Furthermore, the extent of the children will be
calculated in the transformed coordinate.
"""
def __init__(self, aux_transform):
self.aux_transform = aux_transform
OffsetBox.__init__(self)
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
# ref_offset_transform is used to make the offset_transform is
# always reference to the lower-left corner of the bbox of its
# children.
self.ref_offset_transform = mtransforms.Affine2D()
self.ref_offset_transform.clear()
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.aux_transform + \
self.ref_offset_transform + \
self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() # w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# clear the offset transforms
_off = self.offset_transform.to_values() # to be restored later
self.ref_offset_transform.clear()
self.offset_transform.clear()
# calculate the extent
bboxes = [c.get_window_extent(renderer) for c in self._children]
ub = mtransforms.Bbox.union(bboxes)
# adjust ref_offset_tansform
self.ref_offset_transform.translate(-ub.x0, -ub.y0)
# restor offset transform
mtx = self.offset_transform.matrix_from_values(*_off)
self.offset_transform.set_matrix(mtx)
return ub.width, ub.height, 0., 0.
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AnchoredOffsetbox(OffsetBox):
"""
An offset box placed according to the legend location
loc. AnchoredOffsetbox has a single child. When multiple children
is needed, use other OffsetBox class to enclose them. By default,
the offset box is anchored against its parent axes. You may
explicitly specify the bbox_to_anchor.
"""
zorder = 5 # zorder of the legend
def __init__(self, loc,
pad=0.4, borderpad=0.5,
child=None, prop=None, frameon=True,
bbox_to_anchor=None,
bbox_transform=None,
**kwargs):
"""
loc is a string or an integer specifying the legend location.
The valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
pad : pad around the child for drawing a frame. given in
fraction of fontsize.
borderpad : pad between offsetbox frame and the bbox_to_anchor,
child : OffsetBox instance that will be anchored.
prop : font property. This is only used as a reference for paddings.
frameon : draw a frame box if True.
bbox_to_anchor : bbox to anchor. Use self.axes.bbox if None.
bbox_transform : with which the bbox_to_anchor will be transformed.
"""
super(AnchoredOffsetbox, self).__init__(**kwargs)
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
self.set_child(child)
self.loc = loc
self.borderpad = borderpad
self.pad = pad
if prop is None:
self.prop = FontProperties(size=rcParams["legend.fontsize"])
elif isinstance(prop, dict):
self.prop = FontProperties(**prop)
if "size" not in prop:
self.prop.set_size(rcParams["legend.fontsize"])
else:
self.prop = prop
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
self._drawFrame = frameon
def set_child(self, child):
"set the child to be anchored"
self._child = child
def get_child(self):
"return the child"
return self._child
def get_children(self):
"return the list of children"
return [self._child]
def get_extent(self, renderer):
"""
return the extent of the artist. The extent of the child
added with the pad is returned
"""
w, h, xd, yd = self.get_child().get_extent(renderer)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w + 2 * pad, h + 2 * pad, xd + pad, yd + pad
def get_bbox_to_anchor(self):
"""
return the bbox that the legend will be anchored
"""
if self._bbox_to_anchor is None:
return self.axes.bbox
else:
transform = self._bbox_to_anchor_transform
if transform is None:
return self._bbox_to_anchor
else:
return TransformedBbox(self._bbox_to_anchor,
transform)
def set_bbox_to_anchor(self, bbox, transform=None):
"""
set the bbox that the child will be anchored.
*bbox* can be a Bbox instance, a list of [left, bottom, width,
height], or a list of [left, bottom] where the width and
height will be assumed to be zero. The bbox will be
transformed to display coordinate by the given transform.
"""
if bbox is None or isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError:
raise ValueError("Invalid argument for bbox : %s" % str(bbox))
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
self._bbox_to_anchor_transform = transform
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
self._update_offset_func(renderer)
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset(w, h, xd, yd, renderer)
return Bbox.from_bounds(ox - xd, oy - yd, w, h)
def _update_offset_func(self, renderer, fontsize=None):
"""
Update the offset func which depends on the dpi of the
renderer (because of the padding).
"""
if fontsize is None:
fontsize = renderer.points_to_pixels(
self.prop.get_size_in_points())
def _offset(w, h, xd, yd, renderer, fontsize=fontsize, self=self):
bbox = Bbox.from_bounds(0, 0, w, h)
borderpad = self.borderpad * fontsize
bbox_to_anchor = self.get_bbox_to_anchor()
x0, y0 = self._get_anchored_bbox(self.loc,
bbox,
bbox_to_anchor,
borderpad)
return x0 + xd, y0 + yd
self.set_offset(_offset)
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw(self, renderer):
"draw the artist"
if not self.get_visible():
return
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
if self._drawFrame:
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox, fontsize)
self.patch.draw(renderer)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
self.get_child().set_offset((px, py))
self.get_child().draw(renderer)
def _get_anchored_bbox(self, loc, bbox, parentbbox, borderpad):
"""
return the position of the bbox anchored at the parentbbox
with the loc code, with the borderpad.
"""
assert loc in range(1, 11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs = {UR: "NE",
UL: "NW",
LL: "SW",
LR: "SE",
R: "E",
CL: "W",
CR: "E",
LC: "S",
UC: "N",
C: "C"}
c = anchor_coefs[loc]
container = parentbbox.padded(-borderpad)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
class AnchoredText(AnchoredOffsetbox):
"""
AnchoredOffsetbox with Text
"""
def __init__(self, s, loc, pad=0.4, borderpad=0.5, prop=None, **kwargs):
"""
*s* : string
*loc* : location code
*prop* : font property
*pad* : pad between the text and the frame as fraction of the font
size.
*borderpad* : pad between the frame and the axes (or bbox_to_anchor).
other keyword parameters of AnchoredOffsetbox are also allowed.
"""
propkeys = prop.keys()
badkwargs = ('ha', 'horizontalalignment', 'va', 'verticalalignment')
if set(badkwargs) & set(propkeys):
warnings.warn("Mixing horizontalalignment or verticalalignment "
"with AnchoredText is not supported.")
self.txt = TextArea(s, textprops=prop,
minimumdescent=False)
fp = self.txt._text.get_fontproperties()
super(AnchoredText, self).__init__(loc, pad=pad, borderpad=borderpad,
child=self.txt,
prop=fp,
**kwargs)
class OffsetImage(OffsetBox):
def __init__(self, arr,
zoom=1,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample=False,
dpi_cor=True,
**kwargs
):
self._dpi_cor = dpi_cor
self.image = BboxImage(bbox=self.get_window_extent,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self._children = [self.image]
self.set_zoom(zoom)
self.set_data(arr)
OffsetBox.__init__(self)
def set_data(self, arr):
self._data = np.asarray(arr)
self.image.set_data(self._data)
def get_data(self):
return self._data
def set_zoom(self, zoom):
self._zoom = zoom
def get_zoom(self):
return self._zoom
# def set_axes(self, axes):
# self.image.set_axes(axes)
# martist.Artist.set_axes(self, axes)
# def set_offset(self, xy):
# """
# set offset of the container.
# Accept : tuple of x,y coordinate in disokay units.
# """
# self._offset = xy
# self.offset_transform.clear()
# self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_children(self):
return [self.image]
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset()
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# FIXME dpi_cor is never used
if self._dpi_cor: # True, do correction
dpi_cor = renderer.points_to_pixels(1.)
else:
dpi_cor = 1.
zoom = self.get_zoom()
data = self.get_data()
ny, nx = data.shape[:2]
w, h = nx * zoom, ny * zoom
return w, h, 0, 0
def draw(self, renderer):
"""
Draw the children
"""
self.image.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AnnotationBbox(martist.Artist, _AnnotationBase):
"""
Annotation-like class, but with offsetbox instead of Text.
"""
zorder = 3
def __str__(self):
return "AnnotationBbox(%g,%g)" % (self.xy[0], self.xy[1])
@docstring.dedent_interpd
def __init__(self, offsetbox, xy,
xybox=None,
xycoords='data',
boxcoords=None,
frameon=True, pad=0.4, # BboxPatch
annotation_clip=None,
box_alignment=(0.5, 0.5),
bboxprops=None,
arrowprops=None,
fontsize=None,
**kwargs):
"""
*offsetbox* : OffsetBox instance
*xycoords* : same as Annotation but can be a tuple of two
strings which are interpreted as x and y coordinates.
*boxcoords* : similar to textcoords as Annotation but can be a
tuple of two strings which are interpreted as x and y
coordinates.
*box_alignment* : a tuple of two floats for a vertical and
horizontal alignment of the offset box w.r.t. the *boxcoords*.
The lower-left corner is (0.0) and upper-right corner is (1.1).
other parameters are identical to that of Annotation.
"""
self.offsetbox = offsetbox
self.arrowprops = arrowprops
self.set_fontsize(fontsize)
if arrowprops is not None:
self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**self.arrowprops)
else:
self._arrow_relpos = None
self.arrow_patch = None
_AnnotationBase.__init__(self,
xy, xytext=xybox,
xycoords=xycoords, textcoords=boxcoords,
annotation_clip=annotation_clip)
martist.Artist.__init__(self, **kwargs)
#self._fw, self._fh = 0., 0. # for alignment
self._box_alignment = box_alignment
# frame
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=pad)
if bboxprops:
self.patch.set(**bboxprops)
self._drawFrame = frameon
def contains(self, event):
t, tinfo = self.offsetbox.contains(event)
#if self.arrow_patch is not None:
# a,ainfo=self.arrow_patch.contains(event)
# t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t, tinfo
def get_children(self):
children = [self.offsetbox, self.patch]
if self.arrow_patch:
children.append(self.arrow_patch)
return children
def set_figure(self, fig):
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
self.offsetbox.set_figure(fig)
martist.Artist.set_figure(self, fig)
def set_fontsize(self, s=None):
"""
set fontsize in points
"""
if s is None:
s = rcParams["legend.fontsize"]
self.prop = FontProperties(size=s)
def get_fontsize(self, s=None):
"""
return fontsize in points
"""
return self.prop.get_size_in_points()
def update_positions(self, renderer):
"""
Update the pixel positions of the annotated point and the text.
"""
xy_pixel = self._get_position_xy(renderer)
self._update_position_xybox(renderer, xy_pixel)
mutation_scale = renderer.points_to_pixels(self.get_fontsize())
self.patch.set_mutation_scale(mutation_scale)
if self.arrow_patch:
self.arrow_patch.set_mutation_scale(mutation_scale)
def _update_position_xybox(self, renderer, xy_pixel):
"""
Update the pixel positions of the annotation text and the arrow
patch.
"""
x, y = self.xytext
if isinstance(self.textcoords, tuple):
xcoord, ycoord = self.textcoords
x1, y1 = self._get_xy(renderer, x, y, xcoord)
x2, y2 = self._get_xy(renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = self._get_xy(renderer, x, y, self.textcoords)
w, h, xd, yd = self.offsetbox.get_extent(renderer)
_fw, _fh = self._box_alignment
self.offsetbox.set_offset((ox0 - _fw * w + xd, oy0 - _fh * h + yd))
# update patch position
bbox = self.offsetbox.get_window_extent(renderer)
#self.offsetbox.set_offset((ox0-_fw*w, oy0-_fh*h))
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
x, y = xy_pixel
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
fs = self.prop.get_size_in_points()
mutation_scale = d.pop("mutation_scale", fs)
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
patchA = d.pop("patchA", self.patch)
self.arrow_patch.set_patchA(patchA)
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self.update_positions(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
if self._drawFrame:
self.patch.draw(renderer)
self.offsetbox.draw(renderer)
class DraggableBase(object):
"""
helper code for a draggable artist (legend, offsetbox)
The derived class must override following two method.
def saveoffset(self):
pass
def update_offset(self, dx, dy):
pass
*saveoffset* is called when the object is picked for dragging and it is
meant to save reference position of the artist.
*update_offset* is called during the dragging. dx and dy is the pixel
offset from the point where the mouse drag started.
Optionally you may override following two methods.
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def finalize_offset(self):
pass
*artist_picker* is a picker method that will be
used. *finalize_offset* is called when the mouse is released. In
current implementaion of DraggableLegend and DraggableAnnotation,
*update_offset* places the artists simply in display
coordinates. And *finalize_offset* recalculate their position in
the normalized axes coordinate and set a relavant attribute.
"""
def __init__(self, ref_artist, use_blit=False):
self.ref_artist = ref_artist
self.got_artist = False
self.canvas = self.ref_artist.figure.canvas
self._use_blit = use_blit and self.canvas.supports_blit
c2 = self.canvas.mpl_connect('pick_event', self.on_pick)
c3 = self.canvas.mpl_connect('button_release_event', self.on_release)
ref_artist.set_picker(self.artist_picker)
self.cids = [c2, c3]
def on_motion(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.draw()
def on_motion_blit(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.restore_region(self.background)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
def on_pick(self, evt):
if evt.artist == self.ref_artist:
self.mouse_x = evt.mouseevent.x
self.mouse_y = evt.mouseevent.y
self.got_artist = True
if self._use_blit:
self.ref_artist.set_animated(True)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(
self.ref_artist.figure.bbox)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion_blit)
else:
self._c1 = self.canvas.mpl_connect('motion_notify_event',
self.on_motion)
self.save_offset()
def on_release(self, event):
if self.got_artist:
self.finalize_offset()
self.got_artist = False
self.canvas.mpl_disconnect(self._c1)
if self._use_blit:
self.ref_artist.set_animated(False)
def disconnect(self):
"""disconnect the callbacks"""
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def save_offset(self):
pass
def update_offset(self, dx, dy):
pass
def finalize_offset(self):
pass
class DraggableOffsetBox(DraggableBase):
def __init__(self, ref_artist, offsetbox, use_blit=False):
DraggableBase.__init__(self, ref_artist, use_blit=use_blit)
self.offsetbox = offsetbox
def save_offset(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
offset = offsetbox.get_offset(w, h, xd, yd, renderer)
self.offsetbox_x, self.offsetbox_y = offset
self.offsetbox.set_offset(offset)
def update_offset(self, dx, dy):
loc_in_canvas = self.offsetbox_x + dx, self.offsetbox_y + dy
self.offsetbox.set_offset(loc_in_canvas)
def get_loc_in_canvas(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
ox, oy = offsetbox._offset
loc_in_canvas = (ox - xd, oy - yd)
return loc_in_canvas
class DraggableAnnotation(DraggableBase):
def __init__(self, annotation, use_blit=False):
DraggableBase.__init__(self, annotation, use_blit=use_blit)
self.annotation = annotation
def save_offset(self):
ann = self.annotation
x, y = ann.xytext
if isinstance(ann.textcoords, tuple):
xcoord, ycoord = ann.textcoords
x1, y1 = ann._get_xy(self.canvas.renderer, x, y, xcoord)
x2, y2 = ann._get_xy(self.canvas.renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = ann._get_xy(self.canvas.renderer, x, y, ann.textcoords)
self.ox, self.oy = ox0, oy0
self.annotation.textcoords = "figure pixels"
self.update_offset(0, 0)
def update_offset(self, dx, dy):
ann = self.annotation
ann.xytext = self.ox + dx, self.oy + dy
x, y = ann.xytext
# xy is never used
xy = ann._get_xy(self.canvas.renderer, x, y, ann.textcoords)
def finalize_offset(self):
loc_in_canvas = self.annotation.xytext
self.annotation.textcoords = "axes fraction"
pos_axes_fraction = self.annotation.axes.transAxes.inverted()
pos_axes_fraction = pos_axes_fraction.transform_point(loc_in_canvas)
self.annotation.xytext = tuple(pos_axes_fraction)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = plt.subplot(121)
#txt = ax.text(0.5, 0.5, "Test", size=30, ha="center", color="w")
kwargs = dict()
a = np.arange(256).reshape(16, 16) / 256.
myimage = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ax.add_artist(myimage)
myimage.set_offset((100, 100))
myimage2 = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ann = AnnotationBbox(myimage2, (0.5, 0.5),
xybox=(30, 30),
xycoords='data',
boxcoords="offset points",
frameon=True, pad=0.4, # BboxPatch
bboxprops=dict(boxstyle="round", fc="y"),
fontsize=None,
arrowprops=dict(arrowstyle="->"),
)
ax.add_artist(ann)
plt.draw()
plt.show()
| mit |
desihub/fiberassign | setup.py | 1 | 8460 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
#
# Standard imports
#
import glob
import os
import sys
import shutil
#
# setuptools' sdist command ignores MANIFEST.in
#
from distutils.command.sdist import sdist as DistutilsSdist
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.egg_info import egg_info
from distutils.command.clean import clean
from distutils.errors import CompileError
#
# DESI support code.
#
# If this code is being run within the readthedocs environment, then we
# need special steps.
#
# Longer story: desimodel and desitarget are fiberassign build requirements
# but these are not pip installable from a requirements file, due to the way
# that pip handles recursive requirements files and the fact that desiutil is
# required for obtaining even basic package info (egg_info). Since this is
# such a specialized case, we just check if this is running on readthedocs
# and manually pip install things right here.
#
try:
import desiutil
except ImportError:
if os.getenv('READTHEDOCS') == 'True':
import subprocess as sp
dutil = \
'git+https://github.com/desihub/desiutil.git@master#egg=desiutil'
dmodel = \
'git+https://github.com/desihub/desimodel.git@master#egg=desimodel'
dtarget = \
'git+https://github.com/desihub/desitarget.git@master#egg=desitarget'
sp.check_call(['pip', 'install', dutil])
sp.check_call(['pip', 'install', dmodel])
sp.check_call(['pip', 'install', dtarget])
else:
raise
from desiutil.setup import DesiTest, DesiVersion, get_version
#
# Begin setup
#
setup_keywords = dict()
#
# THESE SETTINGS NEED TO BE CHANGED FOR EVERY PRODUCT.
#
setup_keywords['name'] = 'fiberassign'
setup_keywords['description'] = 'DESI Fiber Assignment Tools'
setup_keywords['author'] = 'DESI Collaboration'
setup_keywords['author_email'] = '[email protected]'
setup_keywords['license'] = 'BSD'
setup_keywords['url'] = 'https://github.com/desihub/fiberassign'
#
# END OF SETTINGS THAT NEED TO BE CHANGED.
#
pkg_version = get_version(setup_keywords['name'])
setup_keywords['version'] = pkg_version
cpp_version_file = os.path.join("src", "_version.h")
with open(cpp_version_file, "w") as f:
f.write('// Generated by setup.py -- DO NOT EDIT THIS\n')
f.write('const static std::string package_version("{}");\n\n'
.format(pkg_version))
#
# Use README.rst as long_description.
#
setup_keywords['long_description'] = ''
if os.path.exists('README.rst'):
with open('README.rst') as readme:
setup_keywords['long_description'] = readme.read()
#
# Set other keywords for the setup function. These are automated, & should
# be left alone unless you are an expert.
#
# Treat everything in bin/ except *.rst as a script to be installed.
#
if os.path.isdir('bin'):
setup_keywords['scripts'] = [fname for fname in
glob.glob(os.path.join('bin', '*'))
if not os.path.basename(fname)
.endswith('.rst')]
setup_keywords['provides'] = [setup_keywords['name']]
setup_keywords['python_requires'] = '>=3.6.0'
setup_keywords['setup_requires'] = (['wheel'], )
setup_keywords['install_requires'] = [
'numpy',
'pyyaml',
'scipy',
'matplotlib',
'astropy',
'fitsio'
]
setup_keywords['zip_safe'] = False
setup_keywords['use_2to3'] = False
setup_keywords['packages'] = find_packages('py')
setup_keywords['package_dir'] = {'': 'py'}
setup_keywords['cmdclass'] = {'version': DesiVersion, 'test': DesiTest,
'sdist': DistutilsSdist}
test_suite_name = \
'{name}.test.{name}_test_suite.{name}_test_suite'.format(**setup_keywords)
setup_keywords['test_suite'] = test_suite_name
# Autogenerate command-line scripts.
#
# setup_keywords['entry_points'] =
# {'console_scripts':['desiInstall = desiutil.install.main:main']}
#
# Add internal data directories.
#
# setup_keywords['package_data'] = {'fiberassign': ['data/*',]}
# Add a custom clean command that removes in-tree files like the
# compiled extension.
class RealClean(clean):
def run(self):
super().run()
clean_files = [
"./build",
"./dist",
"py/fiberassign/_internal*",
"py/fiberassign/__pycache__",
"py/fiberassign/test/__pycache__",
"./*.egg-info",
"py/*.egg-info"
]
for cf in clean_files:
# Make paths absolute and relative to this path
apaths = glob.glob(os.path.abspath(cf))
for path in apaths:
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
os.remove(path)
return
# These classes allow us to build a compiled extension that uses pybind11.
# For more details, see:
#
# https://github.com/pybind/python_example
#
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
devnull = None
oldstderr = None
try:
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
devnull = open('/dev/null', 'w')
oldstderr = os.dup(sys.stderr.fileno())
os.dup2(devnull.fileno(), sys.stderr.fileno())
compiler.compile([f.name], extra_postargs=[flagname])
except CompileError:
return False
return True
finally:
if oldstderr is not None:
os.dup2(oldstderr, sys.stderr.fileno())
if devnull is not None:
devnull.close()
def cpp_flag(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is prefered over c++11 (when it is available).
"""
if has_flag(compiler, '-std=c++14'):
return '-std=c++14'
elif has_flag(compiler, '-std=c++11'):
return '-std=c++11'
else:
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'msvc': ['/EHsc'],
'unix': [],
}
if sys.platform.lower() == 'darwin':
c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
linkopts = []
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' %
self.distribution.get_version())
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
opts.append('-fvisibility=hidden')
if has_flag(self.compiler, '-fopenmp'):
opts.append('-fopenmp')
linkopts.append('-fopenmp')
if sys.platform.lower() == 'darwin':
linkopts.append('-stdlib=libc++')
elif ct == 'msvc':
opts.append('/DVERSION_INFO=\\"%s\\"' %
self.distribution.get_version())
for ext in self.extensions:
ext.extra_compile_args.extend(opts)
ext.extra_link_args.extend(linkopts)
# remove -Wstrict-prototypes flag
if '-Wstrict-prototypes' in self.compiler.compiler_so:
self.compiler.compiler_so.remove("-Wstrict-prototypes")
build_ext.build_extensions(self)
ext_modules = [
Extension(
'fiberassign._internal',
[
'src/utils.cpp',
'src/hardware.cpp',
'src/tiles.cpp',
'src/targets.cpp',
'src/assign.cpp',
'src/_pyfiberassign.cpp'
],
include_dirs=[
'src',
],
language='c++'
),
]
setup_keywords['ext_modules'] = ext_modules
setup_keywords['cmdclass']['build_ext'] = BuildExt
setup_keywords['cmdclass']['clean'] = RealClean
#
# Run setup command.
#
setup(**setup_keywords)
| bsd-3-clause |
samuel1208/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
mariusvniekerk/ibis | ibis/client.py | 4 | 13243 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibis.compat import zip as czip
from ibis.config import options
import ibis.expr.types as ir
import ibis.expr.operations as ops
import ibis.sql.compiler as comp
import ibis.common as com
import ibis.util as util
class Client(object):
pass
class Query(object):
"""
Abstraction for DDL query execution to enable both synchronous and
asynchronous queries, progress, cancellation and more (for backends
supporting such functionality).
"""
def __init__(self, client, ddl):
self.client = client
if isinstance(ddl, comp.DDL):
self.compiled_ddl = ddl.compile()
else:
self.compiled_ddl = ddl
self.result_wrapper = getattr(ddl, 'result_handler', None)
def execute(self):
# synchronous by default
with self.client._execute(self.compiled_ddl, results=True) as cur:
result = self._fetch(cur)
return self._wrap_result(result)
def _wrap_result(self, result):
if self.result_wrapper is not None:
result = self.result_wrapper(result)
return result
def _fetch(self, cursor):
import pandas as pd
rows = cursor.fetchall()
# TODO(wesm): please evaluate/reimpl to optimize for perf/memory
dtypes = [self._db_type_to_dtype(x[1]) for x in cursor.description]
names = [x[0] for x in cursor.description]
cols = {}
for (col, name, dtype) in czip(czip(*rows), names, dtypes):
try:
cols[name] = pd.Series(col, dtype=dtype)
except TypeError:
# coercing to specified dtype failed, e.g. NULL vals in int col
cols[name] = pd.Series(col)
return pd.DataFrame(cols, columns=names)
def _db_type_to_dtype(self, db_type):
raise NotImplementedError
class AsyncQuery(Query):
"""
Abstract asynchronous query
"""
def execute(self):
raise NotImplementedError
def is_finished(self):
raise NotImplementedError
def cancel(self):
raise NotImplementedError
def get_result(self):
raise NotImplementedError
class SQLClient(Client):
sync_query = Query
async_query = Query
def table(self, name, database=None):
"""
Create a table expression that references a particular table in the
database
Parameters
----------
name : string
database : string, optional
Returns
-------
table : TableExpr
"""
qualified_name = self._fully_qualified_name(name, database)
schema = self._get_table_schema(qualified_name)
node = ops.DatabaseTable(qualified_name, schema, self)
return self._table_expr_klass(node)
@property
def _table_expr_klass(self):
return ir.TableExpr
@property
def current_database(self):
return self.con.database
def database(self, name=None):
"""
Create a Database object for a given database name that can be used for
exploring and manipulating the objects (tables, functions, views, etc.)
inside
Parameters
----------
name : string
Name of database
Returns
-------
database : Database
"""
# TODO: validate existence of database
if name is None:
name = self.current_database
return self.database_class(name, self)
def _fully_qualified_name(self, name, database):
# XXX
return name
def _execute(self, query, results=False):
cur = self.con.execute(query)
if results:
return cur
else:
cur.release()
def sql(self, query):
"""
Convert a SQL query to an Ibis table expression
Parameters
----------
Returns
-------
table : TableExpr
"""
# Get the schema by adding a LIMIT 0 on to the end of the query. If
# there is already a limit in the query, we find and remove it
limited_query = """\
SELECT *
FROM (
{0}
) t0
LIMIT 0""".format(query)
schema = self._get_schema_using_query(limited_query)
node = ops.SQLQueryResult(query, schema, self)
return ir.TableExpr(node)
def raw_sql(self, query, results=False):
"""
Execute a given query string. Could have unexpected results if the
query modifies the behavior of the session in a way unknown to Ibis; be
careful.
Parameters
----------
query : string
SQL or DDL statement
results : boolean, default False
Pass True if the query as a result set
Returns
-------
cur : ImpalaCursor if results=True, None otherwise
You must call cur.release() after you are finished using the cursor.
"""
return self._execute(query, results=results)
def execute(self, expr, params=None, limit='default', async=False):
"""
Compile and execute Ibis expression using this backend client
interface, returning results in-memory in the appropriate object type
Parameters
----------
expr : Expr
limit : int, default None
For expressions yielding result yets; retrieve at most this number of
values/rows. Overrides any limit already set on the expression.
params : not yet implemented
async : boolean, default False
Returns
-------
output : input type dependent
Table expressions: pandas.DataFrame
Array expressions: pandas.Series
Scalar expressions: Python scalar value
"""
ast = self._build_ast_ensure_limit(expr, limit)
if len(ast.queries) > 1:
raise NotImplementedError
else:
return self._execute_query(ast.queries[0], async=async)
def _execute_query(self, ddl, async=False):
klass = self.async_query if async else self.sync_query
return klass(self, ddl).execute()
def compile(self, expr, params=None, limit=None):
"""
Translate expression to one or more queries according to backend target
Returns
-------
output : single query or list of queries
"""
ast = self._build_ast_ensure_limit(expr, limit)
queries = [query.compile() for query in ast.queries]
return queries[0] if len(queries) == 1 else queries
def _build_ast_ensure_limit(self, expr, limit):
ast = self._build_ast(expr)
# note: limit can still be None at this point, if the global
# default_limit is None
for query in reversed(ast.queries):
if (isinstance(query, comp.Select) and
not isinstance(expr, ir.ScalarExpr) and
query.table_set is not None):
if query.limit is None:
if limit == 'default':
query_limit = options.sql.default_limit
else:
query_limit = limit
if query_limit:
query.limit = {
'n': query_limit,
'offset': 0
}
elif limit is not None and limit != 'default':
query.limit = {'n': limit,
'offset': query.limit['offset']}
return ast
def explain(self, expr):
"""
Query for and return the query plan associated with the indicated
expression or SQL query.
Returns
-------
plan : string
"""
if isinstance(expr, ir.Expr):
ast = self._build_ast(expr)
if len(ast.queries) > 1:
raise Exception('Multi-query expression')
query = ast.queries[0].compile()
else:
query = expr
statement = 'EXPLAIN {0}'.format(query)
with self._execute(statement, results=True) as cur:
result = self._get_list(cur)
return 'Query:\n{0}\n\n{1}'.format(util.indent(query, 2),
'\n'.join(result))
def _build_ast(self, expr):
# Implement in clients
raise NotImplementedError
class QueryPipeline(object):
"""
Execute a series of queries, possibly asynchronously, and capture any
result sets generated
Note: No query pipelines have yet been implemented
"""
pass
def execute(expr, limit='default', async=False):
backend = find_backend(expr)
return backend.execute(expr, limit=limit, async=async)
def compile(expr, limit=None):
backend = find_backend(expr)
return backend.compile(expr, limit=limit)
def find_backend(expr):
backends = []
def walk(expr):
node = expr.op()
for arg in node.flat_args():
if isinstance(arg, Client):
backends.append(arg)
elif isinstance(arg, ir.Expr):
walk(arg)
walk(expr)
backends = util.unique_by_key(backends, id)
if len(backends) > 1:
raise ValueError('Multiple backends found')
elif len(backends) == 0:
default = options.default_backend
if default is None:
raise com.IbisError('Expression depends on no backends, '
'and found no default')
return default
return backends[0]
class Database(object):
def __init__(self, name, client):
self.name = name
self.client = client
def __repr__(self):
return "{0}('{1}')".format('Database', self.name)
def __dir__(self):
attrs = dir(type(self))
unqualified_tables = [self._unqualify(x) for x in self.tables]
return list(sorted(set(attrs + unqualified_tables)))
def __contains__(self, key):
return key in self.tables
@property
def tables(self):
return self.list_tables()
def __getitem__(self, key):
return self.table(key)
def __getattr__(self, key):
special_attrs = ['_ipython_display_', 'trait_names',
'_getAttributeNames']
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in special_attrs:
raise
return self.table(key)
def _qualify(self, value):
return value
def _unqualify(self, value):
return value
def drop(self, force=False):
"""
Drop the database
Parameters
----------
drop : boolean, default False
Drop any objects if they exist, and do not fail if the databaes does
not exist
"""
self.client.drop_database(self.name, force=force)
def namespace(self, ns):
"""
Creates a derived Database instance for collections of objects having a
common prefix. For example, for tables fooa, foob, and fooc, creating
the "foo" namespace would enable you to reference those objects as a,
b, and c, respectively.
Returns
-------
ns : DatabaseNamespace
"""
return DatabaseNamespace(self, ns)
def table(self, name):
"""
Return a table expression referencing a table in this database
Returns
-------
table : TableExpr
"""
qualified_name = self._qualify(name)
return self.client.table(qualified_name, self.name)
def list_tables(self, like=None):
return self.client.list_tables(like=self._qualify_like(like),
database=self.name)
def _qualify_like(self, like):
return like
class DatabaseNamespace(Database):
def __init__(self, parent, namespace):
self.parent = parent
self.namespace = namespace
def __repr__(self):
return ("{0}(database={1!r}, namespace={2!r})"
.format('DatabaseNamespace', self.name, self.namespace))
@property
def client(self):
return self.parent.client
@property
def name(self):
return self.parent.name
def _qualify(self, value):
return self.namespace + value
def _unqualify(self, value):
return value.replace(self.namespace, '', 1)
def _qualify_like(self, like):
if like:
return self.namespace + like
else:
return '{0}*'.format(self.namespace)
class DatabaseEntity(object):
pass
class View(DatabaseEntity):
def drop(self):
pass
| apache-2.0 |
quasiben/bokeh | bokeh/sampledata/daylight.py | 13 | 2683 | """ Daylight hours from http://www.sunrisesunset.com
"""
from __future__ import absolute_import
from bokeh.util.dependencies import import_required
pd = import_required('pandas',
'daylight sample data requires Pandas (http://pandas.pydata.org) to be installed')
import re
import datetime
import requests
from six.moves import xrange
from os.path import join, abspath, dirname
url = "http://sunrisesunset.com/calendar.asp"
r0 = re.compile("<[^>]+>| |[\r\n\t]")
r1 = re.compile(r"(\d+)(DST Begins|DST Ends)?Sunrise: (\d+):(\d\d)Sunset: (\d+):(\d\d)")
def fetch_daylight_hours(lat, lon, tz, dst, year):
"""Fetch daylight hours from sunrisesunset.com for a given location.
Parameters
----------
lat : float
Location's latitude.
lon : float
Location's longitude.
tz : int or float
Time zone offset from UTC. Use floats for half-hour time zones.
dst : int
Daylight saving type, e.g. 0 -> none, 1 -> North America, 2 -> Europe.
See sunrisesunset.com/custom.asp for other possible values.
year : int
Year (1901..2099).
"""
daylight = []
summer = 0 if lat >= 0 else 1
for month in xrange(1, 12+1):
args = dict(url=url, lat=lat, lon=lon, tz=tz, dst=dst, year=year, month=month)
response = requests.get("%(url)s?comb_city_info=_;%(lon)s;%(lat)s;%(tz)s;%(dst)s&month=%(month)s&year=%(year)s&time_type=1&wadj=1" % args)
entries = r1.findall(r0.sub("", response.text))
for day, note, sunrise_hour, sunrise_minute, sunset_hour, sunset_minute in entries:
if note == "DST Begins":
summer = 1
elif note == "DST Ends":
summer = 0
date = datetime.date(year, month, int(day))
sunrise = datetime.time(int(sunrise_hour), int(sunrise_minute))
sunset = datetime.time(int(sunset_hour), int(sunset_minute))
daylight.append([date, sunrise, sunset, summer])
return pd.DataFrame(daylight, columns=["Date", "Sunrise", "Sunset", "Summer"])
# daylight_warsaw_2013 = fetch_daylight_hours(52.2297, -21.0122, 1, 2, 2013)
# daylight_warsaw_2013.to_csv("bokeh/sampledata/daylight_warsaw_2013.csv", index=False)
def load_daylight_hours(file):
path = join(dirname(abspath(__file__)), file)
df = pd.read_csv(path, parse_dates=["Date", "Sunrise", "Sunset"])
df["Date"] = df.Date.map(lambda x: x.date())
df["Sunrise"] = df.Sunrise.map(lambda x: x.time())
df["Sunset"] = df.Sunset.map(lambda x: x.time())
return df
daylight_warsaw_2013 = load_daylight_hours("daylight_warsaw_2013.csv")
| bsd-3-clause |
Subsets and Splits