repo_name
stringlengths 7
60
| path
stringlengths 6
134
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 1.04k
149k
| license
stringclasses 12
values |
---|---|---|---|---|---|
gdl-civestav-localization/cinvestav_location_fingerprinting | experimentation/__init__.py | 1 | 1691 | import os
import cPickle
import matplotlib.pyplot as plt
from datasets import DatasetManager
def plot_cost(results, data_name, plot_label):
plt.figure(plot_label)
plt.ylabel('Accuracy (m)', fontsize=30)
plt.xlabel('Epoch', fontsize=30)
plt.yscale('symlog')
plt.tick_params(axis='both', which='major', labelsize=20)
plt.grid(True)
for i in range(1, 2, 1):
y, x = zip(*results[i][data_name])
name = results[i]['Name']
plt.plot(x, y, label=name, linewidth=5.0)
plt.legend(fontsize='xx-large')
def get_metrics(test_set_y, predicted_values, model_name):
for i in xrange(len(predicted_values)):
print predicted_values[i][1]
if __name__ == '__main__':
"""
seed = 50
with open(os.path.join('experimentation', 'cinvestav_testbed_experiment_results_' + str(seed)), 'rb') as f:
results = cPickle.load(f)
plot_cost(
results=results,
data_name='cost_train',
plot_label='Cost on train phase')
plot_cost(
results=results,
data_name='cost_valid',
plot_label='Cost on valid phase')
plot_cost(
results=results,
data_name='cost_test',
plot_label='Cost on test phase')
plt.show()
"""
seed = 50
dataset, result = DatasetManager.read_dataset2('test_cleaned_dataset.csv', shared=True, seed=seed)
with open(os.path.join('trained_models', 'Logistic Regressionbrandeis_university.save'), 'rb') as f:
model = cPickle.load(f)
predicted_values = model.predict(dataset)
get_metrics(
test_set_y=result,
predicted_values=predicted_values,
model_name='Logistic Regression'
)
| gpl-3.0 |
q1ang/seaborn | seaborn/tests/test_distributions.py | 14 | 8102 | import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
from . import PlotTestCase
from .. import distributions as dist
try:
import statsmodels.nonparametric.api
assert statsmodels.nonparametric.api
_no_statsmodels = False
except ImportError:
_no_statsmodels = True
class TestKDE(PlotTestCase):
rs = np.random.RandomState(0)
x = rs.randn(50)
y = rs.randn(50)
kernel = "gau"
bw = "scott"
gridsize = 128
clip = (-np.inf, np.inf)
cut = 3
def test_scipy_univariate_kde(self):
"""Test the univariate KDE estimation with scipy."""
grid, y = dist._scipy_univariate_kde(self.x, self.bw, self.gridsize,
self.cut, self.clip)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
for bw in ["silverman", .2]:
dist._scipy_univariate_kde(self.x, bw, self.gridsize,
self.cut, self.clip)
@skipif(_no_statsmodels)
def test_statsmodels_univariate_kde(self):
"""Test the univariate KDE estimation with statsmodels."""
grid, y = dist._statsmodels_univariate_kde(self.x, self.kernel,
self.bw, self.gridsize,
self.cut, self.clip)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
for bw in ["silverman", .2]:
dist._statsmodels_univariate_kde(self.x, self.kernel, bw,
self.gridsize, self.cut,
self.clip)
def test_scipy_bivariate_kde(self):
"""Test the bivariate KDE estimation with scipy."""
clip = [self.clip, self.clip]
x, y, z = dist._scipy_bivariate_kde(self.x, self.y, self.bw,
self.gridsize, self.cut, clip)
nt.assert_equal(x.shape, (self.gridsize, self.gridsize))
nt.assert_equal(y.shape, (self.gridsize, self.gridsize))
nt.assert_equal(len(z), self.gridsize)
# Test a specific bandwidth
clip = [self.clip, self.clip]
x, y, z = dist._scipy_bivariate_kde(self.x, self.y, 1,
self.gridsize, self.cut, clip)
# Test that we get an error with an invalid bandwidth
with nt.assert_raises(ValueError):
dist._scipy_bivariate_kde(self.x, self.y, (1, 2),
self.gridsize, self.cut, clip)
@skipif(_no_statsmodels)
def test_statsmodels_bivariate_kde(self):
"""Test the bivariate KDE estimation with statsmodels."""
clip = [self.clip, self.clip]
x, y, z = dist._statsmodels_bivariate_kde(self.x, self.y, self.bw,
self.gridsize,
self.cut, clip)
nt.assert_equal(x.shape, (self.gridsize, self.gridsize))
nt.assert_equal(y.shape, (self.gridsize, self.gridsize))
nt.assert_equal(len(z), self.gridsize)
@skipif(_no_statsmodels)
def test_statsmodels_kde_cumulative(self):
"""Test computation of cumulative KDE."""
grid, y = dist._statsmodels_univariate_kde(self.x, self.kernel,
self.bw, self.gridsize,
self.cut, self.clip,
cumulative=True)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
# make sure y is monotonically increasing
npt.assert_((np.diff(y) > 0).all())
def test_kde_cummulative_2d(self):
"""Check error if args indicate bivariate KDE and cumulative."""
with npt.assert_raises(TypeError):
dist.kdeplot(self.x, data2=self.y, cumulative=True)
def test_bivariate_kde_series(self):
df = pd.DataFrame({'x': self.x, 'y': self.y})
ax_series = dist.kdeplot(df.x, df.y)
ax_values = dist.kdeplot(df.x.values, df.y.values)
nt.assert_equal(len(ax_series.collections),
len(ax_values.collections))
nt.assert_equal(ax_series.collections[0].get_paths(),
ax_values.collections[0].get_paths())
class TestJointPlot(PlotTestCase):
rs = np.random.RandomState(sum(map(ord, "jointplot")))
x = rs.randn(100)
y = rs.randn(100)
data = pd.DataFrame(dict(x=x, y=y))
def test_scatter(self):
g = dist.jointplot("x", "y", self.data)
nt.assert_equal(len(g.ax_joint.collections), 1)
x, y = g.ax_joint.collections[0].get_offsets().T
npt.assert_array_equal(self.x, x)
npt.assert_array_equal(self.y, y)
x_bins = dist._freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = dist._freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
def test_reg(self):
g = dist.jointplot("x", "y", self.data, kind="reg")
nt.assert_equal(len(g.ax_joint.collections), 2)
x, y = g.ax_joint.collections[0].get_offsets().T
npt.assert_array_equal(self.x, x)
npt.assert_array_equal(self.y, y)
x_bins = dist._freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = dist._freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
nt.assert_equal(len(g.ax_joint.lines), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 1)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_resid(self):
g = dist.jointplot("x", "y", self.data, kind="resid")
nt.assert_equal(len(g.ax_joint.collections), 1)
nt.assert_equal(len(g.ax_joint.lines), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 0)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_hex(self):
g = dist.jointplot("x", "y", self.data, kind="hex")
nt.assert_equal(len(g.ax_joint.collections), 1)
x_bins = dist._freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = dist._freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
def test_kde(self):
g = dist.jointplot("x", "y", self.data, kind="kde")
nt.assert_true(len(g.ax_joint.collections) > 0)
nt.assert_equal(len(g.ax_marg_x.collections), 1)
nt.assert_equal(len(g.ax_marg_y.collections), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 1)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_color(self):
g = dist.jointplot("x", "y", self.data, color="purple")
purple = mpl.colors.colorConverter.to_rgb("purple")
scatter_color = g.ax_joint.collections[0].get_facecolor()[0, :3]
nt.assert_equal(tuple(scatter_color), purple)
hist_color = g.ax_marg_x.patches[0].get_facecolor()[:3]
nt.assert_equal(hist_color, purple)
def test_annotation(self):
g = dist.jointplot("x", "y", self.data)
nt.assert_equal(len(g.ax_joint.legend_.get_texts()), 1)
g = dist.jointplot("x", "y", self.data, stat_func=None)
nt.assert_is(g.ax_joint.legend_, None)
def test_hex_customise(self):
# test that default gridsize can be overridden
g = dist.jointplot("x", "y", self.data, kind="hex",
joint_kws=dict(gridsize=5))
nt.assert_equal(len(g.ax_joint.collections), 1)
a = g.ax_joint.collections[0].get_array()
nt.assert_equal(28, a.shape[0]) # 28 hexagons expected for gridsize 5
def test_bad_kind(self):
with nt.assert_raises(ValueError):
dist.jointplot("x", "y", self.data, kind="not_a_kind")
| bsd-3-clause |
bnoi/scikit-tracker | sktracker/tracker/cost_function/tests/test_abstract_cost_functions.py | 1 | 1500 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from nose.tools import assert_raises
import sys
import pandas as pd
import numpy as np
from sktracker.tracker.cost_function import AbstractCostFunction
def test_abstract_cost_function():
cost_func = AbstractCostFunction(context={}, parameters={})
assert cost_func.get_block() == None
def test_abstract_cost_function_check_context():
cost_func = AbstractCostFunction(context={'cost': 1}, parameters={})
assert_raises(ValueError, cost_func.check_context, 'test_string', str)
cost_func.context['test_string'] = 5
assert_raises(TypeError, cost_func.check_context, 'test_string', str)
cost_func.context['test_string'] = "i am a string"
### This fails in py2.7
if sys.version_info[0] > 2:
cost_func.check_context('test_string', str)
assert True
def test_abstract_cost_function_check_columns():
cost_func = AbstractCostFunction(context={}, parameters={})
df = pd.DataFrame([np.arange(0, 5), np.arange(20, 25)],
columns=['x', 'y', 'z', 'w', 't'])
cost_func.check_columns(df, ['t', 'z', 'y'])
cost_func.check_columns([df], ['t', 'z', 'y'])
df = pd.DataFrame([np.arange(0, 4), np.arange(20, 24)],
columns=['x', 'y', 'w', 't'])
assert_raises(ValueError, cost_func.check_columns, df, ['t', 'z', 'y'])
| bsd-3-clause |
jreback/pandas | pandas/io/formats/latex.py | 2 | 25201 | """
Module for formatting output data in Latex.
"""
from abc import ABC, abstractmethod
from typing import Iterator, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.io.formats.format import DataFrameFormatter
def _split_into_full_short_caption(
caption: Optional[Union[str, Tuple[str, str]]]
) -> Tuple[str, str]:
"""Extract full and short captions from caption string/tuple.
Parameters
----------
caption : str or tuple, optional
Either table caption string or tuple (full_caption, short_caption).
If string is provided, then it is treated as table full caption,
while short_caption is considered an empty string.
Returns
-------
full_caption, short_caption : tuple
Tuple of full_caption, short_caption strings.
"""
if caption:
if isinstance(caption, str):
full_caption = caption
short_caption = ""
else:
try:
full_caption, short_caption = caption
except ValueError as err:
msg = "caption must be either a string or a tuple of two strings"
raise ValueError(msg) from err
else:
full_caption = ""
short_caption = ""
return full_caption, short_caption
class RowStringConverter(ABC):
r"""Converter for dataframe rows into LaTeX strings.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
multicolumn: bool, optional
Whether to use \multicolumn macro.
multicolumn_format: str, optional
Multicolumn format.
multirow: bool, optional
Whether to use \multirow macro.
"""
def __init__(
self,
formatter: DataFrameFormatter,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.clinebuf: List[List[int]] = []
self.strcols = self._get_strcols()
self.strrows = list(zip(*self.strcols))
def get_strrow(self, row_num: int) -> str:
"""Get string representation of the row."""
row = self.strrows[row_num]
is_multicol = (
row_num < self.column_levels and self.fmt.header and self.multicolumn
)
is_multirow = (
row_num >= self.header_levels
and self.fmt.index
and self.multirow
and self.index_levels > 1
)
is_cline_maybe_required = is_multirow and row_num < len(self.strrows) - 1
crow = self._preprocess_row(row)
if is_multicol:
crow = self._format_multicolumn(crow)
if is_multirow:
crow = self._format_multirow(crow, row_num)
lst = []
lst.append(" & ".join(crow))
lst.append(" \\\\")
if is_cline_maybe_required:
cline = self._compose_cline(row_num, len(self.strcols))
lst.append(cline)
return "".join(lst)
@property
def _header_row_num(self) -> int:
"""Number of rows in header."""
return self.header_levels if self.fmt.header else 0
@property
def index_levels(self) -> int:
"""Integer number of levels in index."""
return self.frame.index.nlevels
@property
def column_levels(self) -> int:
return self.frame.columns.nlevels
@property
def header_levels(self) -> int:
nlevels = self.column_levels
if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
return nlevels
def _get_strcols(self) -> List[List[str]]:
"""String representation of the columns."""
if self.fmt.frame.empty:
strcols = [[self._empty_info_line]]
else:
strcols = self.fmt.get_strcols()
# reestablish the MultiIndex that has been joined by get_strcols()
if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex):
out = self.frame.index.format(
adjoin=False,
sparsify=self.fmt.sparsify,
names=self.fmt.has_index_names,
na_rep=self.fmt.na_rep,
)
# index.format will sparsify repeated entries with empty strings
# so pad these with some empty space
def pad_empties(x):
for pad in reversed(x):
if pad:
break
return [x[0]] + [i if i else " " * len(pad) for i in x[1:]]
gen = (pad_empties(i) for i in out)
# Add empty spaces for each column level
clevels = self.frame.columns.nlevels
out = [[" " * len(i[-1])] * clevels + i for i in gen]
# Add the column names to the last index column
cnames = self.frame.columns.names
if any(cnames):
new_names = [i if i else "{}" for i in cnames]
out[self.frame.index.nlevels - 1][:clevels] = new_names
# Get rid of old multiindex column and add new ones
strcols = out + strcols[1:]
return strcols
@property
def _empty_info_line(self):
return (
f"Empty {type(self.frame).__name__}\n"
f"Columns: {self.frame.columns}\n"
f"Index: {self.frame.index}"
)
def _preprocess_row(self, row: Sequence[str]) -> List[str]:
"""Preprocess elements of the row."""
if self.fmt.escape:
crow = _escape_symbols(row)
else:
crow = [x if x else "{}" for x in row]
if self.fmt.bold_rows and self.fmt.index:
crow = _convert_to_bold(crow, self.index_levels)
return crow
def _format_multicolumn(self, row: List[str]) -> List[str]:
r"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = row[: self.index_levels]
ncol = 1
coltext = ""
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append(
f"\\multicolumn{{{ncol:d}}}{{{self.multicolumn_format}}}"
f"{{{coltext.strip()}}}"
)
# don't modify where not needed
else:
row2.append(coltext)
for c in row[self.index_levels :]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row: List[str], i: int) -> List[str]:
r"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(self.index_levels):
if row[j].strip():
nrow = 1
for r in self.strrows[i + 1 :]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = f"\\multirow{{{nrow:d}}}{{*}}{{{row[j].strip()}}}"
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _compose_cline(self, i: int, icol: int) -> str:
"""
Create clines after multirow-blocks are finished.
"""
lst = []
for cl in self.clinebuf:
if cl[0] == i:
lst.append(f"\n\\cline{{{cl[1]:d}-{icol:d}}}")
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
return "".join(lst)
class RowStringIterator(RowStringConverter):
"""Iterator over rows of the header or the body of the table."""
@abstractmethod
def __iter__(self) -> Iterator[str]:
"""Iterate over LaTeX string representations of rows."""
class RowHeaderIterator(RowStringIterator):
"""Iterator for the table header rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num < self._header_row_num:
yield self.get_strrow(row_num)
class RowBodyIterator(RowStringIterator):
"""Iterator for the table body rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num >= self._header_row_num:
yield self.get_strrow(row_num)
class TableBuilderAbstract(ABC):
"""
Abstract table builder producing string representation of LaTeX table.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
column_format: str, optional
Column format, for example, 'rcl' for three columns.
multicolumn: bool, optional
Use multicolumn to enhance MultiIndex columns.
multicolumn_format: str, optional
The alignment for multicolumns, similar to column_format.
multirow: bool, optional
Use multirow to enhance MultiIndex rows.
caption: str, optional
Table caption.
short_caption: str, optional
Table short caption.
label: str, optional
LaTeX label.
position: str, optional
Float placement specifier, for example, 'htb'.
"""
def __init__(
self,
formatter: DataFrameFormatter,
column_format: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[str] = None,
short_caption: Optional[str] = None,
label: Optional[str] = None,
position: Optional[str] = None,
):
self.fmt = formatter
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption = caption
self.short_caption = short_caption
self.label = label
self.position = position
def get_result(self) -> str:
"""String representation of LaTeX table."""
elements = [
self.env_begin,
self.top_separator,
self.header,
self.middle_separator,
self.env_body,
self.bottom_separator,
self.env_end,
]
result = "\n".join([item for item in elements if item])
trailing_newline = "\n"
result += trailing_newline
return result
@property
@abstractmethod
def env_begin(self) -> str:
"""Beginning of the environment."""
@property
@abstractmethod
def top_separator(self) -> str:
"""Top level separator."""
@property
@abstractmethod
def header(self) -> str:
"""Header lines."""
@property
@abstractmethod
def middle_separator(self) -> str:
"""Middle level separator."""
@property
@abstractmethod
def env_body(self) -> str:
"""Environment body."""
@property
@abstractmethod
def bottom_separator(self) -> str:
"""Bottom level separator."""
@property
@abstractmethod
def env_end(self) -> str:
"""End of the environment."""
class GenericTableBuilder(TableBuilderAbstract):
"""Table builder producing string representation of LaTeX table."""
@property
def header(self) -> str:
iterator = self._create_row_iterator(over="header")
return "\n".join(list(iterator))
@property
def top_separator(self) -> str:
return "\\toprule"
@property
def middle_separator(self) -> str:
return "\\midrule" if self._is_separator_required() else ""
@property
def env_body(self) -> str:
iterator = self._create_row_iterator(over="body")
return "\n".join(list(iterator))
def _is_separator_required(self) -> bool:
return bool(self.header and self.env_body)
@property
def _position_macro(self) -> str:
r"""Position macro, extracted from self.position, like [h]."""
return f"[{self.position}]" if self.position else ""
@property
def _caption_macro(self) -> str:
r"""Caption macro, extracted from self.caption.
With short caption:
\caption[short_caption]{caption_string}.
Without short caption:
\caption{caption_string}.
"""
if self.caption:
return "".join(
[
r"\caption",
f"[{self.short_caption}]" if self.short_caption else "",
f"{{{self.caption}}}",
]
)
return ""
@property
def _label_macro(self) -> str:
r"""Label macro, extracted from self.label, like \label{ref}."""
return f"\\label{{{self.label}}}" if self.label else ""
def _create_row_iterator(self, over: str) -> RowStringIterator:
"""Create iterator over header or body of the table.
Parameters
----------
over : {'body', 'header'}
Over what to iterate.
Returns
-------
RowStringIterator
Iterator over body or header.
"""
iterator_kind = self._select_iterator(over)
return iterator_kind(
formatter=self.fmt,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
)
def _select_iterator(self, over: str) -> Type[RowStringIterator]:
"""Select proper iterator over table rows."""
if over == "header":
return RowHeaderIterator
elif over == "body":
return RowBodyIterator
else:
msg = f"'over' must be either 'header' or 'body', but {over} was provided"
raise ValueError(msg)
class LongTableBuilder(GenericTableBuilder):
"""Concrete table builder for longtable.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = LongTableBuilder(formatter, caption='a long table',
... label='tab:long', column_format='lrl')
>>> table = builder.get_result()
>>> print(table)
\\begin{longtable}{lrl}
\\caption{a long table}
\\label{tab:long}\\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endfirsthead
\\caption[]{a long table} \\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endhead
\\midrule
\\multicolumn{3}{r}{{Continued on next page}} \\\\
\\midrule
\\endfoot
<BLANKLINE>
\\bottomrule
\\endlastfoot
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\end{longtable}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
first_row = (
f"\\begin{{longtable}}{self._position_macro}{{{self.column_format}}}"
)
elements = [first_row, f"{self._caption_and_label()}"]
return "\n".join([item for item in elements if item])
def _caption_and_label(self) -> str:
if self.caption or self.label:
double_backslash = "\\\\"
elements = [f"{self._caption_macro}", f"{self._label_macro}"]
caption_and_label = "\n".join([item for item in elements if item])
caption_and_label += double_backslash
return caption_and_label
else:
return ""
@property
def middle_separator(self) -> str:
iterator = self._create_row_iterator(over="header")
# the content between \endfirsthead and \endhead commands
# mitigates repeated List of Tables entries in the final LaTeX
# document when dealing with longtable environments; GH #34360
elements = [
"\\midrule",
"\\endfirsthead",
f"\\caption[]{{{self.caption}}} \\\\" if self.caption else "",
self.top_separator,
self.header,
"\\midrule",
"\\endhead",
"\\midrule",
f"\\multicolumn{{{len(iterator.strcols)}}}{{r}}"
"{{Continued on next page}} \\\\",
"\\midrule",
"\\endfoot\n",
"\\bottomrule",
"\\endlastfoot",
]
if self._is_separator_required():
return "\n".join(elements)
return ""
@property
def bottom_separator(self) -> str:
return ""
@property
def env_end(self) -> str:
return "\\end{longtable}"
class RegularTableBuilder(GenericTableBuilder):
"""Concrete table builder for regular table.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = RegularTableBuilder(formatter, caption='caption', label='lab',
... column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{table}
\\centering
\\caption{caption}
\\label{lab}
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
\\end{table}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
elements = [
f"\\begin{{table}}{self._position_macro}",
"\\centering",
f"{self._caption_macro}",
f"{self._label_macro}",
f"\\begin{{tabular}}{{{self.column_format}}}",
]
return "\n".join([item for item in elements if item])
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\n".join(["\\end{tabular}", "\\end{table}"])
class TabularBuilder(GenericTableBuilder):
"""Concrete table builder for tabular environment.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = TabularBuilder(formatter, column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
return f"\\begin{{tabular}}{{{self.column_format}}}"
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\\end{tabular}"
class LatexFormatter:
r"""
Used to render a DataFrame to a LaTeX tabular/longtable environment output.
Parameters
----------
formatter : `DataFrameFormatter`
longtable : bool, default False
Use longtable environment.
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
multicolumn : bool, default False
Use \multicolumn to enhance MultiIndex columns.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
multirow : bool, default False
Use \multirow to enhance MultiIndex rows.
caption : str or tuple, optional
Tuple (full_caption, short_caption),
which results in \caption[short_caption]{full_caption};
if a single string is passed, no short caption will be set.
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
position : str, optional
The LaTeX positional argument for tables, to be placed after
``\begin{}`` in the output.
See Also
--------
HTMLFormatter
"""
def __init__(
self,
formatter: DataFrameFormatter,
longtable: bool = False,
column_format: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[Union[str, Tuple[str, str]]] = None,
label: Optional[str] = None,
position: Optional[str] = None,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.longtable = longtable
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption, self.short_caption = _split_into_full_short_caption(caption)
self.label = label
self.position = position
def to_string(self) -> str:
"""
Render a DataFrame to a LaTeX tabular, longtable, or table/tabular
environment output.
"""
return self.builder.get_result()
@property
def builder(self) -> TableBuilderAbstract:
"""Concrete table builder.
Returns
-------
TableBuilder
"""
builder = self._select_builder()
return builder(
formatter=self.fmt,
column_format=self.column_format,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
caption=self.caption,
short_caption=self.short_caption,
label=self.label,
position=self.position,
)
def _select_builder(self) -> Type[TableBuilderAbstract]:
"""Select proper table builder."""
if self.longtable:
return LongTableBuilder
if any([self.caption, self.label, self.position]):
return RegularTableBuilder
return TabularBuilder
@property
def column_format(self) -> Optional[str]:
"""Column format."""
return self._column_format
@column_format.setter
def column_format(self, input_column_format: Optional[str]) -> None:
"""Setter for column format."""
if input_column_format is None:
self._column_format = (
self._get_index_format() + self._get_column_format_based_on_dtypes()
)
elif not isinstance(input_column_format, str):
raise ValueError(
f"column_format must be str or unicode, "
f"not {type(input_column_format)}"
)
else:
self._column_format = input_column_format
def _get_column_format_based_on_dtypes(self) -> str:
"""Get column format based on data type.
Right alignment for numbers and left - for strings.
"""
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return "r"
return "l"
dtypes = self.frame.dtypes._values
return "".join(map(get_col_type, dtypes))
def _get_index_format(self) -> str:
"""Get index column format."""
return "l" * self.frame.index.nlevels if self.fmt.index else ""
def _escape_symbols(row: Sequence[str]) -> List[str]:
"""Carry out string replacements for special symbols.
Parameters
----------
row : list
List of string, that may contain special symbols.
Returns
-------
list
list of strings with the special symbols replaced.
"""
return [
(
x.replace("\\", "\\textbackslash ")
.replace("_", "\\_")
.replace("%", "\\%")
.replace("$", "\\$")
.replace("#", "\\#")
.replace("{", "\\{")
.replace("}", "\\}")
.replace("~", "\\textasciitilde ")
.replace("^", "\\textasciicircum ")
.replace("&", "\\&")
if (x and x != "{}")
else "{}"
)
for x in row
]
def _convert_to_bold(crow: Sequence[str], ilevels: int) -> List[str]:
"""Convert elements in ``crow`` to bold."""
return [
f"\\textbf{{{x}}}" if j < ilevels and x.strip() not in ["", "{}"] else x
for j, x in enumerate(crow)
]
if __name__ == "__main__":
import doctest
doctest.testmod()
| bsd-3-clause |
liyi193328/seq2seq | seq2seq/contrib/learn/tests/dataframe/arithmetic_transform_test.py | 62 | 2343 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arithmetic transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class SumTestCase(test.TestCase):
"""Test class for `Sum` transform."""
def testSum(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a+b"] = frame["a"] + frame["b"]
expected_sum = pandas_df["a"] + pandas_df["b"]
actual_sum = frame.run_one_batch()["a+b"]
np.testing.assert_array_equal(expected_sum, actual_sum)
class DifferenceTestCase(test.TestCase):
"""Test class for `Difference` transform."""
def testDifference(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a-b"] = frame["a"] - frame["b"]
expected_diff = pandas_df["a"] - pandas_df["b"]
actual_diff = frame.run_one_batch()["a-b"]
np.testing.assert_array_equal(expected_diff, actual_diff)
if __name__ == "__main__":
test.main()
| apache-2.0 |
jakobworldpeace/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 104 | 3139 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.cv_results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
justrypython/EAST | svm_model_v2.py | 1 | 2801 | #encoding:UTF-8
import os
import numpy as np
import sys
import cv2
import matplotlib.pyplot as plt
from sklearn.svm import NuSVC, SVC
import datetime
import pickle
#calculate the area
def area(p):
p = p.reshape((-1, 2))
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in segments(p)))
def segments(p):
return zip(p, np.concatenate((p[1:], [p[0]])))
def calc_xy(p0, p1, p2):
cos = calc_cos(p0, p1, p2)
dis = calc_dis(p0, p2)
return dis * cos, dis * np.sqrt(1 - np.square(cos))
def calc_dis(p0, p1):
return np.sqrt(np.sum(np.square(p0-p1)))
def calc_cos(p0, p1, p2):
A = p1 - p0
B = p2 - p0
num = np.dot(A, B)
demon = np.linalg.norm(A) * np.linalg.norm(B)
return num / demon
def calc_new_xy(boxes):
box0 = boxes[:8]
box1 = boxes[8:]
x, y = calc_xy(box1[4:6], box1[6:], box0[:2])
dis = calc_dis(box1[4:6], box1[6:])
area0 = area(box0)
area1 = area(box1)
return x/dis, y/dis
if __name__ == '__main__':
test = True
path = '/media/zhaoke/b0685ee4-63e3-4691-ae02-feceacff6996/data/'
paths = os.listdir(path)
paths = [i for i in paths if '.txt' in i]
boxes = np.empty((800000, 9))
cnt = 0
for txt in paths:
f = open(path+txt, 'r')
lines = f.readlines()
f.close()
lines = [i.replace('\n', '').split(',') for i in lines]
lines = np.array(lines).astype(np.uint32)
boxes[cnt*10:cnt*10+len(lines)] = lines
cnt += 1
zeros = boxes==[0, 0, 0, 0, 0, 0, 0, 0, 0]
zeros_labels = zeros.all(axis=1)
zeros_labels = np.where(zeros_labels==True)
idboxes = boxes[boxes[:, 8]==7]
idboxes = np.tile(idboxes[:, :8], (1, 10))
idboxes = idboxes.reshape((-1, 8))
boxes = np.delete(boxes, zeros_labels[0], axis=0)
idboxes = np.delete(idboxes, zeros_labels[0], axis=0)
boxes_idboxes = np.concatenate((boxes[:, :8], idboxes), axis=1)
start_time = datetime.datetime.now()
print start_time
new_xy = np.apply_along_axis(calc_new_xy, 1, boxes_idboxes)
end_time = datetime.datetime.now()
print end_time - start_time
if test:
with open('clf_address_v2.pickle', 'rb') as f:
clf = pickle.load(f)
cnt = 0
for i, xy in enumerate(new_xy):
cls = int(clf.predict([xy])[0])
if cls == int(boxes[i, 8]):
cnt += 1
if i % 10000 == 0 and i != 0:
print i, ':', float(cnt) / i
else:
clf = SVC()
start_time = datetime.datetime.now()
print start_time
clf.fit(new_xy[:], boxes[:, 8])
end_time = datetime.datetime.now()
print end_time - start_time
with open('clf.pickle', 'wb') as f:
pickle.dump(clf, f)
print 'end' | gpl-3.0 |
abbeymiles/aima-python | submissions/Blue/myNN.py | 10 | 3071 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Blue import music
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
musicATRB = DataFrame()
musicATRB.data = []
targetData = []
'''
Extract data from the CORGIS Music Library.
Most 'hit' songs average 48-52 bars and no more than ~3 minutes (180 seconds)...
'''
allSongs = music.get_songs()
for song in allSongs:
try:
length = float(song['song']["duration"])
targetData.append(length)
genre = song['artist']['terms'] #String
title = song['song']['title'] #String
# release = float(song['song']['Release'])
musicATRB.data.append([genre, title])
except:
traceback.print_exc()
musicATRB.feature_names = [
'Genre',
'Title',
'Release',
'Length',
]
musicATRB.target = []
def musicTarget(release):
if (song['song']['duration'] <= 210
): #if the song is less that 3.5 minutes (210 seconds) long
return 1
return 0
for i in targetData:
tt = musicTarget(i)
musicATRB.target.append(tt)
musicATRB.target_names = [
'Not a hit song',
'Could be a hit song',
]
Examples = {
'Music': musicATRB,
}
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
hidden_layer_sizes = (100,),
activation = 'relu',
solver='sgd', # 'adam',
alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
musicScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(musicATRB.data)
musicScaled.data = scaleGrid(musicATRB.data)
musicScaled.feature_names = musicATRB.feature_names
musicScaled.target = musicATRB.target
musicScaled.target_names = musicATRB.target_names
Examples = {
'musicDefault': {
'frame': musicATRB,
},
'MusicSGD': {
'frame': musicATRB,
'mlpc': mlpc
},
'MusisScaled': {
'frame': musicScaled,
},
} | mit |
dtkav/naclports | ports/ipython-ppapi/kernel.py | 7 | 12026 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A simple shell that uses the IPython messaging system."""
# Override platform information.
import platform
platform.system = lambda: "pnacl"
platform.release = lambda: "chrome"
import time
import json
import logging
import sys
import Queue
import thread
stdin_input = Queue.Queue()
shell_input = Queue.Queue()
stdin_output = Queue.Queue()
shell_output = Queue.Queue()
iopub_output = Queue.Queue()
sys_stdout = sys.stdout
sys_stderr = sys.stderr
def emit(s):
print >> sys_stderr, "EMITTING: %s" % (s)
time.sleep(1)
import IPython
from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
from IPython.utils.traitlets import Type, Dict, Instance
from IPython.core.displayhook import DisplayHook
from IPython.utils import py3compat
from IPython.utils.py3compat import builtin_mod
from IPython.utils.jsonutil import json_clean, encode_images
from IPython.core.displaypub import DisplayPublisher
from IPython.config.configurable import Configurable
# module defined in shell.cc for communicating via pepper API
from pyppapi import nacl_instance
def CreateMessage(msg_type, parent_header=None, content=None):
if parent_header is None:
parent_header = {}
if content is None:
content = {}
return {
'header': {'msg_type': msg_type},
'parent_header': parent_header,
'content': content,
'msg_type': msg_type,
}
class MsgOutStream(object):
"""Class to overrides stderr and stdout."""
def __init__(self, stream_name):
self._stream_name = stream_name
self._parent_header = {}
def SetParentHeader(self, parent_header):
self._parent_header = parent_header
def close(self):
pass
def flush(self):
pass
def write(self, string):
iopub_output.put(CreateMessage('stream', parent_header=self._parent_header,
content={'name': self._stream_name, 'data': string}))
def writelines(self, sequence):
for string in sequence:
self.write(string)
# override sys.stdout and sys.stderr to broadcast on iopub
stdout_stream = MsgOutStream('stdout')
stderr_stream = MsgOutStream('stderr')
sys.stdout = stdout_stream
sys.stderr = stderr_stream
class PepperShellDisplayHook(DisplayHook):
parent_header = Dict({})
def set_parent_header(self, parent_header):
"""Set the parent for outbound messages."""
self.parent_header = parent_header
def start_displayhook(self):
self.content = {}
def write_output_prompt(self):
self.content['execution_count'] = self.prompt_count
def write_format_data(self, format_dict, md_dict=None):
self.content['data'] = encode_images(format_dict)
self.content['metadata'] = md_dict
def finish_displayhook(self):
sys.stdout.flush()
sys.stderr.flush()
iopub_output.put(CreateMessage('pyout', parent_header=self.parent_header,
content=self.content))
self.content = None
class PepperDisplayPublisher(DisplayPublisher):
parent_header = Dict({})
def set_parent_header(self, parent_header):
self.parent_header = parent_header
def _flush_streams(self):
"""flush IO Streams prior to display"""
sys.stdout.flush()
sys.stderr.flush()
def publish(self, source, data, metadata=None):
self._flush_streams()
if metadata is None:
metadata = {}
self._validate_data(source, data, metadata)
content = {}
content['source'] = source
content['data'] = encode_images(data)
content['metadata'] = metadata
iopub_output.put(CreateMessage('display_data', content=json_clean(content),
parent_header=self.parent_header))
def clear_output(self, stdout=True, stderr=True, other=True):
content = dict(stdout=stdout, stderr=stderr, other=other)
if stdout:
sys.stdout.write('\r')
if stderr:
sys.stderr.write('\r')
self._flush_streams()
iopub_output.put(CreateMessage('clear_output', content=content,
parent_header=self.parent_header))
class PepperInteractiveShell(InteractiveShell):
"""A subclass of InteractiveShell for the Pepper Messagin API."""
displayhook_class = Type(PepperShellDisplayHook)
display_pub_class = Type(PepperDisplayPublisher)
@staticmethod
def enable_gui(gui):
pass
InteractiveShellABC.register(PepperInteractiveShell)
class PepperKernel(Configurable):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
shell_class = Type(PepperInteractiveShell)
def __init__(self):
self.shell = self.shell_class.instance(parent=self)
self.shell.run_cell("""
import os
matplotlib_config_dir = '/mplconfigdir'
os.environ['XDG_CONFIG_HOME'] = matplotlib_config_dir
os.environ['TMP'] = ''
import matplotlib
import matplotlib.cbook
""")
shell = PepperKernel().shell
# Taken from IPython 2.x branch, IPython/kernel/zmq/ipykernel.py
def _complete(msg):
c = msg['content']
try:
cpos = int(c['cursor_pos'])
except:
# If we don't get something that we can convert to an integer, at
# least attempt the completion guessing the cursor is at the end of
# the text, if there's any, and otherwise of the line
cpos = len(c['text'])
if cpos==0:
cpos = len(c['line'])
return shell.complete(c['text'], c['line'], cpos)
# Special message to indicate the NaCl kernel is ready.
iopub_output.put(CreateMessage('status', content={'execution_state': 'nacl_ready'}))
def _no_raw_input(self):
"""Raise StdinNotImplentedError if active frontend doesn't support
stdin."""
raise StdinNotImplementedError("raw_input was called, but this "
"frontend does not support stdin.")
def _raw_input(prompt, parent_header):
# Flush output before making the request.
sys.stderr.flush()
sys.stdout.flush()
# flush the stdin socket, to purge stale replies
while True:
try:
stdin_input.get_nowait()
except Queue.Empty:
break
# Send the input request.
content = json_clean(dict(prompt=prompt))
stdin_output.put(CreateMessage('input_request', content=content,
parent_header=parent_header))
# Await a response.
while True:
try:
reply = stdin_input.get()
except Exception:
print "Invalid Message"
except KeyboardInterrupt:
# re-raise KeyboardInterrupt, to truncate traceback
raise KeyboardInterrupt
else:
break
try:
value = py3compat.unicode_to_str(reply['content']['value'])
except:
print "Got bad raw_input reply: "
print reply
value = ''
if value == '\x04':
# EOF
raise EOFError
return value
def main_loop():
execution_count = 1
while 1:
iopub_output.put(CreateMessage('status', content={'execution_state': 'idle'}))
msg = shell_input.get()
iopub_output.put(CreateMessage('status', content={'execution_state': 'busy'}))
if not 'header' in msg:
continue
request_header = msg['header']
if not 'msg_type' in request_header:
continue
msg_type = request_header['msg_type']
if msg_type == 'execute_request':
try:
content = msg[u'content']
code = content[u'code']
silent = content[u'silent']
store_history = content.get(u'store_history', not silent)
except:
self.log.error("Got bad msg: ")
self.log.error("%s", msg)
continue
# Replace raw_input. Note that is not sufficient to replace
# raw_input in the user namespace.
if content.get('allow_stdin', False):
raw_input = lambda prompt='': _raw_input(prompt, request_header)
input = lambda prompt='': eval(raw_input(prompt))
else:
raw_input = input = lambda prompt='' : _no_raw_input()
if py3compat.PY3:
_sys_raw_input = builtin_mod.input
builtin_mod.input = raw_input
else:
_sys_raw_input = builtin_mod.raw_input
_sys_eval_input = builtin_mod.input
builtin_mod.raw_input = raw_input
builtin_mod.input = input
# Let output streams know which message the output is for
stdout_stream.SetParentHeader(request_header)
stderr_stream.SetParentHeader(request_header)
shell.displayhook.set_parent_header(request_header)
shell.display_pub.set_parent_header(request_header)
status = 'ok'
content = {}
try:
shell.run_cell(msg['content']['code'],
store_history=store_history,
silent=silent)
except Exception, ex:
status = 'error'
logging.exception('Exception occured while running cell')
finally:
# Restore raw_input.
if py3compat.PY3:
builtin_mod.input = _sys_raw_input
else:
builtin_mod.raw_input = _sys_raw_input
builtin_mod.input = _sys_eval_input
content = {'status': status,
'execution_count': execution_count}
if status == 'ok':
content['payload'] = []
content['user_variables'] = {}
content['user_expressions'] = {}
elif status == 'error':
content['ename'] = type(ex).__name__
content['evalue'] = str(ex)
content['traceback'] = []
execution_count += 1
if status == 'error':
iopub_output.put(CreateMessage('pyerr', parent_header=request_header,
content={
'execution_count': execution_count,
'ename': type(ex).__name__,
'evalue': str(ex),
'traceback': []
}
))
shell_output.put(CreateMessage('execute_reply', parent_header=request_header,
content=content))
elif msg_type == 'complete_request':
# Taken from IPython 2.x branch, IPython/kernel/zmq/ipykernel.py
txt, matches = _complete(msg)
matches = {'matches' : matches,
'matched_text' : txt,
'status' : 'ok'}
matches = json_clean(matches)
shell_output.put(CreateMessage('complete_reply',
parent_header = request_header,
content = matches))
elif msg_type == 'object_info_request':
# Taken from IPython 2.x branch, IPython/kernel/zmq/ipykernel.py
content = msg['content']
object_info = shell.object_inspect(content['oname'],
detail_level = content.get('detail_level', 0))
# Before we send this object over, we scrub it for JSON usage
oinfo = json_clean(object_info)
shell_output.put(CreateMessage('object_info_reply',
parent_header = request_header,
content = oinfo))
elif msg_type == 'restart':
# break out of this loop, ending this program.
# The main event loop in shell.cc will then
# run this program again.
break
elif msg_type == 'kill':
# Raise an exception so that the function
# running this script will return -1, resulting
# in no restart of this script.
raise RuntimeError
thread.start_new_thread(main_loop, ())
def deal_message(msg):
channel = msg['stream']
content = json.loads(msg['json'])
queues = {'shell': shell_input, 'stdin': stdin_input}
queue = queues[channel]
queue.put(content)
def send_message(stream, msg):
nacl_instance.send_raw_object({
'stream': stream,
'json': json.dumps(msg)
})
while 1:
msg = nacl_instance.wait_for_message(timeout=1, sleeptime=10000)
try:
deal_message(msg)
except:
pass
output_streams = [
(stdin_output, 'stdin'),
(shell_output, 'shell'),
(iopub_output, 'iopub')
]
for msg_queue, stream in output_streams:
msg = None
try:
msg = msg_queue.get_nowait()
send_message(stream, msg)
except Queue.Empty:
pass
| bsd-3-clause |
LarsDu/DeepNuc | deepnuc/nucbinaryclassifier.py | 2 | 15464 | import tensorflow as tf
import numpy as np
import sklearn.metrics as metrics
#from databatcher import DataBatcher
import nucconvmodel
#import dubiotools as dbt
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pprint
from itertools import cycle
import os
import sys
#Logging imports
from logger import Logger
from nucinference import NucInference
from collections import OrderedDict
class NucBinaryClassifier(NucInference):
use_onehot_labels = True
def __init__(self,
sess,
train_batcher,
test_batcher,
num_epochs,
learning_rate,
batch_size,
seq_len,
save_dir,
keep_prob=0.5,
beta1=0.9,
concat_revcom_input=False,
nn_method_key="inferenceA",
pos_index=1):
"""NucBinaryClassifier encapsulates training and data
evaluation for
:param sess: tf.Session() object
:param train_batcher: DataBatcher object for training set
:param test_batcher: DataBatcher object for test set
:param num_epochs: Number of epoch cycles to perform training
:param learning_rate: Learning rate
:param batch_size: Mini-batch pull size
:param seq_len: Sequence length
:param save_dir: Root save directory for binary classification model
:param keep_prob: Probability of keeping weight for dropout
regularization
:param beta1: Beta1 parameter for AdamOptimizer
:param concat_revcom_input: If true, concatenate reverse
complement of nucleotide sequence to input vector
:param nn_method_key: Dictionary key for inference
method found in nucconvmodels.py file. Determines which model
to use. Example: "inferenceA" will run nucconvmodels.inferenceA
:param pos_index: The index to use for the positive class
(defaults to 1)
:returns: a NucBinaryClassifier object
:rtype: NucBinaryClassifier
"""
super(NucBinaryClassifier, self).__init__(sess,
train_batcher,
test_batcher,
num_epochs,
learning_rate,
batch_size,
seq_len,
save_dir,
keep_prob,
beta1,
concat_revcom_input,
nn_method_key="inferenceA")
if self.train_batcher.num_classes != 2:
print "Error, more than two classes detected in train batcher"
else:
self.num_classes = 2
#The index for the label that should be considered the positive class
self.pos_index=pos_index
self.save_on_epoch = 5
def build_model(self):
self.dna_seq_placeholder = tf.placeholder(tf.float32,
shape=[None,self.seq_len,4],
name="dna_seq")
self.labels_placeholder = tf.placeholder(tf.float32,
shape=[None, self.num_classes],
name="labels")
self.keep_prob_placeholder = tf.placeholder(tf.float32,name="keep_prob")
self.logits, self.network = self.nn_method(self.dna_seq_placeholder,
self.keep_prob_placeholder,
self.num_classes)
self.probs = tf.nn.softmax(self.logits)
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.labels_placeholder,
logits=self.logits))
'''
Calculate metrics. num_true positives is the number of true positives for the current batch
Table below shows index if tf.argmax is applied
+-----+-----------+---------+
| | Classifier| Label |
+-----+-----------+---------+
| TP | 1 | 1 |
+-----+-----------+---------+
| FP | 1 | 0 |
+-----+-----------+---------+
| TN | 0 | 0 |
+-----+-----------+---------+
| FN | 0 | 1 |
+-----+-----------+---------+
Precision = TP/(TP+FP)
Recall = TP/(TP+FN)
F1-score = 2*(Prec*Rec)/(Prec+Rec)
# Note: I ended up not using the tp,fp,tn,fn ops because I ended up calculating
# these metrics using sklearn.
'''
#correct = TN+TP #Used for calculating accuracy
self.logits_ind = tf.argmax(self.logits,1)
self.labels_ind = tf.argmax(self.labels_placeholder,1)
#Create max_mask of logits (ie: [-.5,.5] --> [0 1]. Note logits have
# shape [batch_size * num_classes= 2]
#self.inverse_logits_col = tf.ones_like(self.logits_ind) - self.logits_ind
#self.max_mask_logits = tf.concat([self.inverse_logits_col,self.logits_ind],1)
#True positives where logits_ind+labels_ind == 2
#True negatives where logits_ind+labels_ind == 0
self.sum_ind = tf.add(self.logits_ind,self.labels_ind)
self.true_positives = tf.equal(self.sum_ind,2*tf.ones_like(self.sum_ind)) #bool
self.num_true_positives =tf.reduce_sum(tf.cast(self.true_positives, tf.int32))
#For FP classifier index > label index
self.false_positives=tf.greater(self.logits_ind,self.labels_ind)
self.num_false_positives = tf.reduce_sum(tf.cast(self.false_positives, tf.int32))
self.true_negatives = tf.equal(self.sum_ind,tf.zeros_like(self.sum_ind)) #bool
self.num_true_negatives= tf.reduce_sum(tf.cast(self.true_negatives,tf.int32))
#For FN classifier index < label index
self.false_negatives=tf.less(self.logits_ind,self.labels_ind)
self.num_false_negatives = tf.reduce_sum(tf.cast(self.false_negatives,tf.int32))
#num correct can be used to calculate accuracy
self.correct = tf.equal(self.logits_ind,self.labels_ind)
self.num_correct= tf.reduce_sum(tf.cast(self.correct, tf.int32))
self.relevance =self.network.relevance_backprop(tf.multiply(self.logits,
self.labels_placeholder))
'''Write and consolidate summaries'''
self.loss_summary = tf.summary.scalar('loss',self.loss)
self.summary_writer = tf.summary.FileWriter(self.summary_dir,self.sess.graph)
self.summary_op = tf.summary.merge([self.loss_summary])
#Note: Do not use tf.summary.merge_all() here. This will break encapsulation for
# cross validation and lead to crashes when training multiple models
# Add gradient ops to graph with learning rate
self.train_op = tf.train.AdamOptimizer(self.learning_rate,
beta1=self.beta1).minimize(self.loss)
self.vars = tf.trainable_variables()
self.var_names = [var.name for var in self.vars]
#print "Trainable variables:\n"
#for vname in self.var_names:
# print vname
self.saver = tf.train.Saver()
self.init_op = tf.global_variables_initializer()
#Important note: Restoring model does not require init_op.
#In fact calling tf.global_variables_initializer() after loading a model
#will overwrite loaded weights
self.sess.run(self.init_op)
self.load(self.checkpoint_dir)
def eval_model_metrics(self,
batcher,
save_plots=False,
image_name ='metrics.png',
eval_batch_size=50):
"""
Note: This method only works for binary classification
as auPRC and auROC graphs only apply to binary classificaton problems.
TODO: Modify this code to perform auROC generation
for one-vs-all in the case of multiclass classification.
"""
#Ref: http://scikit-learn.org/stable/modules/model_evaluation.html#roc-metrics
##auROC calculations
#Keep batch size at 1 for now to ensure 1 full epoch is evaluated
all_labels = np.zeros((batcher.num_records,self.num_classes), dtype = np.float32)
all_probs = np.zeros((batcher.num_records,self.num_classes), dtype = np.float32)
#num_correct = 0 #counts number of correct predictions
num_whole_pulls = batcher.num_records//eval_batch_size
num_single_pulls = batcher.num_records%eval_batch_size
num_steps = num_whole_pulls+num_single_pulls
for i in range(num_steps):
if i<num_whole_pulls:
batch_size=eval_batch_size
else:
batch_size=1
labels_batch, dna_seq_batch = batcher.pull_batch(batch_size)
feed_dict = {
self.dna_seq_placeholder:dna_seq_batch,
self.labels_placeholder:labels_batch,
self.keep_prob_placeholder:1.0
}
cur_prob= self.sess.run(self.probs,feed_dict=feed_dict)
#Fill labels array
if batch_size > 1:
start_ind = batch_size*i
elif batch_size == 1:
start_ind = num_whole_pulls*eval_batch_size+(i-num_whole_pulls)
else:
print "Never reach this condition"
all_labels[start_ind:start_ind+batch_size,:] = labels_batch
all_probs[start_ind:start_ind+batch_size,:] = cur_prob
#Calculate metrics and save results in a dict
md = self.calc_classifier_metrics(all_labels,all_probs)
md["epoch"]=self.epoch
md["step"]=self.step
#print "Testing accuracy",float(num_correct)/float(batcher.num_records)
print 'Num examples: %d Num correct: %d Accuracy: %0.04f' % \
(batcher.num_records, md["num_correct"], md["accuracy"])+'\n'
if save_plots:
###Plot some metrics
plot_colors = cycle(['cyan','blue','orange','teal'])
#print "Labels shape",all_labels.shape
#print "Probs shape",all_probs.shape
#print "Preds shape",all_preds.shape
#Generate auROC plot axes
fig1,ax1 = plt.subplots(2)
fig1.subplots_adjust(bottom=0.2)
ax1[0].plot([0,1],[0,1],color='navy',lw=2,linestyle='--')
ax1[0].set_xbound(0.0,1.0)
ax1[0].set_ybound(0.0,1.05)
ax1[0].set_xlabel('False Positive Rate')
ax1[0].set_ylabel('True Positive Rate')
ax1[0].set_title('auROC')
#plt.legend(loc='lower right')
ax1[0].plot(md["fpr"],md["tpr"],color=plot_colors.next(),
lw=2,linestyle='-',label='auROC curve (area=%0.2f)' % md["auroc"] )
#Generate auPRC plot axes
#ax1[1].plot([0,1],[1,1],color='royalblue',lw=2,linestyle='--')
ax1[1].set_xlabel('Precision')
ax1[1].set_ylabel('Recall')
ax1[1].set_title('auPRC')
ax1[1].plot(md["thresh_precision"],md["thresh_recall"],color=plot_colors.next(),
lw=2,linestyle='-',label='auPRC curve (area=%0.2f)' % md["auprc"] )
ax1[1].set_xbound(0.0,1.0)
ax1[1].set_ybound(0.0,1.05)
#Note: avg prec score is the area under the prec recall curve
#Note: Presumably class 1 (pos examples) should be the only f1 score we focus on
#print "F1 score for class",i,"is",f1_score
plt.tight_layout()
plt_fname = self.save_dir+os.sep+image_name
print "Saving auROC image to",plt_fname
fig1.savefig(plt_fname)
#Return metrics dictionary
return md
def calc_classifier_metrics(self,all_labels,all_probs):
"""Calculate some metrics for the dataset
return dictionary with metrics
:param all_probs: nx2 prob values
:param all_labels: nx2 labels
:returns: dictionary of metrics
:rtype: dict()
"""
num_records = all_probs.shape[0]
all_preds = np.zeros((num_records, self.num_classes),dtype = np.float32)
all_preds[np.arange(num_records),all_probs.argmax(1)] = 1
#Calculate accuracy
num_correct = metrics.accuracy_score(all_labels[:,self.pos_index],all_preds[:,self.pos_index],normalize=False)
accuracy = num_correct/float(all_preds.shape[0])
###Calculate auROC
#http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
#metrics.roc_curve(y_true, y_score[, ...]) #y_score is probs
fpr,tpr,_ = metrics.roc_curve(all_labels[:,self.pos_index],
all_probs[:,self.pos_index],
pos_label=self.pos_index)
auroc = metrics.auc(fpr,tpr)
thresh_precision,thresh_recall,prc_thresholds = metrics.precision_recall_curve(
all_labels[:,self.pos_index],
all_probs[:,self.pos_index])
#Calculate precision, recall, and f1-score for threshold = 0.5
#confusion_matrix = metrics.confusion_matrix(all_labels[:,self.pos_index],all_probs[:,self.pos_index])
precision, recall, f1_score, support = metrics.precision_recall_fscore_support(
all_labels[:,self.pos_index],
all_preds[:,self.pos_index],
pos_label=self.pos_index)
precision = precision[self.pos_index]
recall = recall[self.pos_index]
f1_score = f1_score[self.pos_index]
support = support[self.pos_index]
auprc = metrics.average_precision_score(all_labels[:,self.pos_index],
all_probs[:,self.pos_index])
return OrderedDict([
("num_correct",num_correct),
("accuracy",accuracy),
("auroc",auroc),
("auprc",auprc),
("fpr",fpr),
("tpr",tpr),
("precision",precision),
("recall",recall),
("f1_score",f1_score),
("support",support),
("thresh_precision",thresh_precision),
("thresh_recall",thresh_recall),
("prc_thresholds",prc_thresholds)
])
| gpl-3.0 |
klahnakoski/ActiveData | vendor/mo_testing/fuzzytestcase.py | 1 | 9712 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
import datetime
import types
import unittest
from mo_collections.unique_index import UniqueIndex
import mo_dots
from mo_dots import coalesce, is_container, is_list, literal_field, unwrap, to_data, is_data, is_many
from mo_future import is_text, zip_longest, first
from mo_logs import Except, Log, suppress_exception
from mo_logs.strings import expand_template, quote
import mo_math
from mo_math import is_number, log10
from mo_times import dates
class FuzzyTestCase(unittest.TestCase):
"""
COMPARE STRUCTURE AND NUMBERS!
ONLY THE ATTRIBUTES IN THE expected STRUCTURE ARE TESTED TO EXIST
EXTRA ATTRIBUTES ARE IGNORED.
NUMBERS ARE MATCHED BY ...
* places (UP TO GIVEN SIGNIFICANT DIGITS)
* digits (UP TO GIVEN DECIMAL PLACES, WITH NEGATIVE MEANING LEFT-OF-UNITS)
* delta (MAXIMUM ABSOLUTE DIFFERENCE FROM expected)
"""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.default_places=15
def set_default_places(self, places):
"""
WHEN COMPARING float, HOW MANY DIGITS ARE SIGNIFICANT BY DEFAULT
"""
self.default_places=places
def assertAlmostEqual(self, test_value, expected, msg=None, digits=None, places=None, delta=None):
if delta or digits:
assertAlmostEqual(test_value, expected, msg=msg, digits=digits, places=places, delta=delta)
else:
assertAlmostEqual(test_value, expected, msg=msg, digits=digits, places=coalesce(places, self.default_places), delta=delta)
def assertEqual(self, test_value, expected, msg=None, digits=None, places=None, delta=None):
self.assertAlmostEqual(test_value, expected, msg=msg, digits=digits, places=places, delta=delta)
def assertRaises(self, problem=None, function=None, *args, **kwargs):
if function is None:
return RaiseContext(self, problem=problem or Exception)
with RaiseContext(self, problem=problem):
function(*args, **kwargs)
class RaiseContext(object):
def __init__(self, this, problem=Exception):
self.this = this
self.problem = problem
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_val:
Log.error("Expecting an error")
f = Except.wrap(exc_val)
if isinstance(self.problem, (list, tuple)):
problems = self.problem
else:
problems = [self.problem]
causes = []
for problem in problems:
if isinstance(problem, object.__class__) and issubclass(problem, BaseException) and isinstance(exc_val, problem):
return True
try:
self.this.assertIn(problem, f)
return True
except Exception as cause:
causes.append(cause)
Log.error("problem is not raised", cause=first(causes))
def assertAlmostEqual(test, expected, digits=None, places=None, msg=None, delta=None):
show_detail = True
test = unwrap(test)
expected = unwrap(expected)
try:
if test is None and (is_null_op(expected) or expected is None):
return
elif test is expected:
return
elif is_text(expected):
assertAlmostEqualValue(test, expected, msg=msg, digits=digits, places=places, delta=delta)
elif isinstance(test, UniqueIndex):
if test ^ expected:
Log.error("Sets do not match")
elif is_data(expected) and is_data(test):
for k, e in unwrap(expected).items():
t = test.get(k)
assertAlmostEqual(t, e, msg=coalesce(msg, "")+"key "+quote(k)+": ", digits=digits, places=places, delta=delta)
elif is_data(expected):
if is_many(test):
test = list(test)
if len(test) != 1:
Log.error("Expecting data, not a list")
test = test[0]
for k, e in expected.items():
try:
t = test[k]
assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta)
continue
except:
pass
t = mo_dots.get_attr(test, literal_field(k))
assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta)
elif is_container(test) and isinstance(expected, set):
test = set(to_data(t) for t in test)
if len(test) != len(expected):
Log.error(
"Sets do not match, element count different:\n{{test|json|indent}}\nexpecting{{expectedtest|json|indent}}",
test=test,
expected=expected
)
try:
return len(set(test)|expected) == len(expected)
except:
for e in expected:
for t in test:
try:
assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta)
break
except Exception as _:
pass
else:
Log.error("Sets do not match. {{value|json}} not found in {{test|json}}", value=e, test=test)
elif isinstance(expected, types.FunctionType):
return expected(test)
elif hasattr(test, "__iter__") and hasattr(expected, "__iter__"):
if test.__class__.__name__ == "ndarray": # numpy
test = test.tolist()
elif test.__class__.__name__ == "DataFrame": # pandas
test = test[test.columns[0]].values.tolist()
elif test.__class__.__name__ == "Series": # pandas
test = test.values.tolist()
if not expected and test == None:
return
if expected == None:
expected = [] # REPRESENT NOTHING
for t, e in zip_longest(test, expected):
assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta)
else:
assertAlmostEqualValue(test, expected, msg=msg, digits=digits, places=places, delta=delta)
except Exception as cause:
Log.error(
"{{test|json|limit(10000)}} does not match expected {{expected|json|limit(10000)}}",
test=test if show_detail else "[can not show]",
expected=expected if show_detail else "[can not show]",
cause=cause
)
def assertAlmostEqualValue(test, expected, digits=None, places=None, msg=None, delta=None):
"""
Snagged from unittest/case.py, then modified (Aug2014)
"""
if is_null_op(expected):
if test == None: # pandas dataframes reject any comparision with an exception!
return
else:
raise AssertionError(expand_template("{{test|json}} != NULL", locals()))
if expected == None: # None has no expectations
return
if test == expected:
# shortcut
return
if isinstance(expected, (dates.Date, datetime.datetime, datetime.date)):
return assertAlmostEqualValue(
dates.Date(test).unix,
dates.Date(expected).unix,
msg=msg,
digits=digits,
places=places,
delta=delta
)
if not is_number(expected):
# SOME SPECIAL CASES, EXPECTING EMPTY CONTAINERS IS THE SAME AS EXPECTING NULL
if is_list(expected) and len(expected) == 0 and test == None:
return
if is_data(expected) and not expected.keys() and test == None:
return
if test != expected:
raise AssertionError(expand_template("{{test|json}} != {{expected|json}}", locals()))
return
elif not is_number(test):
try:
# ASSUME IT IS A UTC DATE
test = dates.parse(test).unix
except Exception as e:
raise AssertionError(expand_template("{{test|json}} != {{expected}}", locals()))
num_param = 0
if digits != None:
num_param += 1
if places != None:
num_param += 1
if delta != None:
num_param += 1
if num_param > 1:
raise TypeError("specify only one of digits, places or delta")
if digits is not None:
with suppress_exception:
diff = log10(abs(test-expected))
if diff < digits:
return
standardMsg = expand_template("{{test|json}} != {{expected|json}} within {{digits}} decimal places", locals())
elif delta is not None:
if abs(test - expected) <= delta:
return
standardMsg = expand_template("{{test|json}} != {{expected|json}} within {{delta}} delta", locals())
else:
if places is None:
places = 15
with suppress_exception:
diff = mo_math.log10(abs(test-expected))
if diff == None:
return # Exactly the same
if diff < mo_math.ceiling(mo_math.log10(abs(test)))-places:
return
standardMsg = expand_template("{{test|json}} != {{expected|json}} within {{places}} places", locals())
raise AssertionError(coalesce(msg, "") + ": (" + standardMsg + ")")
def is_null_op(v):
return v.__class__.__name__ == "NullOp"
| mpl-2.0 |
alexsavio/scikit-learn | examples/gaussian_process/plot_gpc_iris.py | 81 | 2231 | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y])
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
| bsd-3-clause |
georgid/sms-tools | lectures/7-Sinusoidal-plus-residual-model/plots-code/stochasticSynthesisFrame.py | 2 | 2997 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
def stochasticModelFrame(x, w, N, stocf) :
# x: input array sound, w: analysis window, N: FFT size,
# stocf: decimation factor of mag spectrum for stochastic analysis
hN = N/2 # size of positive spectrum
hM = (w.size)/2 # half analysis window size
pin = hM # initialize sound pointer in middle of analysis window
fftbuffer = np.zeros(N) # initialize buffer for FFT
yw = np.zeros(w.size) # initialize output sound frame
w = w / sum(w) # normalize analysis window
#-----analysis-----
xw = x[pin-hM:pin+hM] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10( abs(X[:hN]) ) # magnitude spectrum of positive frequencies
mXenv = resample(np.maximum(-200, mX), mX.size*stocf) # decimate the mag spectrum
pX = np.angle(X[:hN])
#-----synthesis-----
mY = resample(mXenv, hN) # interpolate to original size
pY = 2*np.pi*np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype = complex)
Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.
Y[hN+1:] = 10**(mY[:0:-1]/20) * np.exp(-1j*pY[:0:-1]) # generate negative freq.
fftbuffer = np.real( ifft(Y) ) # inverse FFT
y = fftbuffer*N/2
return mX, pX, mY, pY, y
# example call of stochasticModel function
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hanning(1024)
N = 1024
stocf = 0.1
maxFreq = 10000.0
lastbin = N*maxFreq/fs
first = 1000
last = first+w.size
mX, pX, mY, pY, y = stochasticModelFrame(x[first:last], w, N, stocf)
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(np.arange(0, fs/2.0, fs/float(N)), mY, 'r', lw=1.5, label="mY")
plt.axis([0, maxFreq, -78, max(mX)+0.5])
plt.title('mY (stochastic approximation of mX)')
plt.subplot(3,1,2)
plt.plot(np.arange(0, fs/2.0, fs/float(N)), pY-np.pi, 'c', lw=1.5, label="pY")
plt.axis([0, maxFreq, -np.pi, np.pi])
plt.title('pY random phases)')
plt.subplot(3,1,3)
plt.plot(np.arange(first, last)/float(fs), y, 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(y), max(y)])
plt.title('yst')
plt.tight_layout()
plt.savefig('stochasticSynthesisFrame.png')
plt.show()
| agpl-3.0 |
ansobolev/regCMPostProc | src/plot.py | 1 | 2816 | #!/usr/bin/env python
# RegCM postprocessing tool
# Copyright (C) 2014 Aliou, Addisu, Kanhu, Andrey
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from value import Value
class Plotter(object):
def __init__(self, value):
self._value = value
self.lat, self.lon = value.latlon
def plot(self, coastlines=True,
countries=True,
places=True,
title=None,
levels = None):
if levels is not None:
l_min, l_max = levels
l = (l_max - l_min) / 10
levels = range(l_min, l_max + l, l)
projection = ccrs.PlateCarree()
self.fig, self.ax = plt.subplots(subplot_kw={'projection': projection})
if coastlines:
self.ax.coastlines('10m')
if countries:
countries = cfeature.NaturalEarthFeature(
scale='110m', category='cultural', name='admin_0_countries')
self.ax.add_feature(countries, color='r', alpha=0.1)
if places:
places = cfeature.NaturalEarthFeature(
scale='110m', category='cultural', name='populated_places')
self.ax.add_feature(places, color='b', hatch='o')
cx = self.ax.contourf(self.lon, self.lat, self._value.data, transform=ccrs.PlateCarree(),cmap='bwr', levels=levels)
# To mask out OCEAN or LAND
#ax.add_feature(cfeature.OCEAN)
#ax.add_feature(cfeature.LAND)
self.ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='blue', alpha=0.5, linestyle='-')
self.fig.colorbar(cx)
times = self._value.limits['time']
plt.title(self._value.title + ' [' + self._value.units + ']\n' +
'mean between ' + str(times[0]) + ' and ' + str(times[1]) + '\n')
def show(self):
plt.show()
def save(self, filename, format):
plt.savefig(filename + '.' + format)
def close(self):
plt.close(self.fig)
if __name__ == "__main__":
pass | gpl-3.0 |
gotomypc/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
Jay-Jay-D/LeanSTP | Algorithm.Framework/Portfolio/MinimumVariancePortfolioOptimizer.py | 3 | 4622 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from scipy.optimize import minimize
### <summary>
### Provides an implementation of a portfolio optimizer that calculate the optimal weights
### with the weight range from -1 to 1 and minimize the portfolio variance with a target return of 2%
### </summary>
class MinimumVariancePortfolioOptimizer:
'''Provides an implementation of a portfolio optimizer that calculate the optimal weights
with the weight range from -1 to 1 and minimize the portfolio variance with a target return of 2%'''
def __init__(self,
minimum_weight = -1,
maximum_weight = 1,
target_return = 0.02):
'''Initialize the MinimumVariancePortfolioOptimizer
Args:
minimum_weight(float): The lower bounds on portfolio weights
maximum_weight(float): The upper bounds on portfolio weights
target_return(float): The target portfolio return'''
self.minimum_weight = minimum_weight
self.maximum_weight = maximum_weight
self.target_return = target_return
def Optimize(self, historicalReturns, expectedReturns = None, covariance = None):
'''
Perform portfolio optimization for a provided matrix of historical returns and an array of expected returns
args:
historicalReturns: Matrix of annualized historical returns where each column represents a security and each row returns for the given date/time (size: K x N).
expectedReturns: Array of double with the portfolio annualized expected returns (size: K x 1).
covariance: Multi-dimensional array of double with the portfolio covariance of annualized returns (size: K x K).
Returns:
Array of double with the portfolio weights (size: K x 1)
'''
if covariance is None:
covariance = historicalReturns.cov()
if expectedReturns is None:
expectedReturns = historicalReturns.mean()
size = historicalReturns.columns.size # K x 1
x0 = np.array(size * [1. / size])
constraints = [
{'type': 'eq', 'fun': lambda weights: self.get_budget_constraint(weights)},
{'type': 'eq', 'fun': lambda weights: self.get_target_constraint(weights, expectedReturns)}]
opt = minimize(lambda weights: self.portfolio_variance(weights, covariance), # Objective function
x0, # Initial guess
bounds = self.get_boundary_conditions(size), # Bounds for variables
constraints = constraints, # Constraints definition
method='SLSQP') # Optimization method: Sequential Least SQuares Programming
return opt['x']
def portfolio_variance(self, weights, covariance):
'''Computes the portfolio variance
Args:
weighs: Portfolio weights
covariance: Covariance matrix of historical returns'''
variance = np.dot(weights.T, np.dot(covariance, weights))
if variance == 0:
raise ValueError(f'MinimumVariancePortfolioOptimizer.portfolio_variance: Volatility cannot be zero. Weights: {weights}')
return variance
def get_boundary_conditions(self, size):
'''Creates the boundary condition for the portfolio weights'''
return tuple((self.minimum_weight, self.maximum_weight) for x in range(size))
def get_budget_constraint(self, weights):
'''Defines a budget constraint: the sum of the weights equals unity'''
return np.sum(weights) - 1
def get_target_constraint(self, weights, expectedReturns):
'''Ensure that the portfolio return target a given return'''
return np.dot(np.matrix(expectedReturns), np.matrix(weights).T).item() - self.target_return | apache-2.0 |
sadimanna/computer_vision | clustering/kmeansppclustering_with_gap_statistic.py | 1 | 2599 | #K-Means++ Clustering with Gap Statistic to determine the optimal number of clusters
import sys
import numpy as np
import scipy.io as sio
#import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.svm import SVC
filename = sys.argv[1]
datafile = sio.loadmat(filename)
data = datafile['bow']
sizedata=[len(data), len(data[0])]
disp = []
optimal_ks = []
#Determining the optimal number of k with gap statistic method
def gap_statistic(data):
sizedata = [len(data),len(data[0])]
SD = []
gap = []
for knum in xrange(1,20):
#I assumed that the number of clusters in my data won't be more than 20, this can be changed accordingly
print knum
#Clustering original Data
kmeanspp = KMeans(n_clusters=knum,init = 'k-means++',max_iter = 100,n_jobs = 1)
kmeanspp.fit(data)
dispersion = kmeanspp.inertia_
#Clustering Reference Data
nrefs = 10
refDisp = np.zeros(nrefs)
for nref in xrange(nrefs):
refdata = np.random.random_sample(tuple(sizedata))
refkmeans = KMeans(n_clusters=knum,init='k-means++',max_iter=100,n_jobs=1)
refkmeans.fit(refdata)
refdisp = refkmeans.inertia_
refDisp[nref]=np.log(refdisp)
mean_log_refdisp = np.mean(refDisp)
gap.append(mean_log_refdisp-np.log(dispersion))
sd = (sum([(r-m)**2 for r,m in zip(refDisp,[mean_log_refdisp]*nrefs)])/nrefs)**0.5
SD.append(sd)
SD = [sd*((1+(1/nrefs))**0.5) for sd in SD]
opt_k = None
diff = []
for i in xrange(len(gap)-1):
diff = (SD[i+1]-(gap[i+1]-gap[i]))
if diff>0:
opt_k = i+10
break
if opt_k < 20:
#print opt_k
return opt_k
else:
return 20
#Returning 20 if opt_k is more than 20 in my case, as I wanted not to search more than 20.
# Not required if range is larger.
ntrials = 50
for ntrial in xrange(ntrials):
print 'ntrial: ',ntrial
optimal_ks.append(gap_statistic(data))
#For plotting the gap statistic measure
#plt.plot(np.linspace(10,19,10,True),gap)
#plt.show()
unique_opt_k = list(set(optimal_ks))
k_count = {}
count_opt_k = 0
second_opt_k = 0
opt_k = 0
for u_o_k in unique_opt_k:
count = optimal_ks.count(u_o_k)
k_count[u_o_k]=count
if count>count_opt_k:
count_opt_k = count
opt_k = u_o_k
elif count==count_opt_k:
second_opt_k = u_o_k
print opt_k
print k_count
#Clusterin with optimal number of k
kmeanspp = KMeans(n_clusters = opt_k,init='k-means++',max_iter=100,n_jobs=1)
kmeanspp.fit(data)
centers = kmeanspp.cluster_centers_
clusterlabels = kmeanspp.labels_
print clusterlabels
mdict = {}
mdict['clusterlabels'] = clusterlabels
sio.savemat('clusterlabels.mat',mdict,format = '4',oned_as = 'column')
print 'dan dana dan done...'
| gpl-3.0 |
jcrist/blaze | blaze/compute/tests/test_bcolz_compute.py | 9 | 5874 | from __future__ import absolute_import, division, print_function
import pytest
bcolz = pytest.importorskip('bcolz')
from datashape import discover, dshape
import numpy as np
import pandas.util.testing as tm
from odo import into
from blaze import by
from blaze.expr import symbol
from blaze.compute.core import compute, pre_compute
from blaze.compute.bcolz import get_chunksize
b = bcolz.ctable(np.array([(1, 1., np.datetime64('2010-01-01')),
(2, 2., np.datetime64('NaT')),
(3, 3., np.datetime64('2010-01-03'))],
dtype=[('a', 'i8'),
('b', 'f8'),
('date', 'datetime64[D]')]))
t = symbol('t', 'var * {a: int64, b: float64, date: ?date}')
to = symbol('to', 'var * {a: int64, b: float64}')
bo = bcolz.ctable(np.array([(1, 1.), (2, 2.), (3, np.nan)],
dtype=[('a', 'i8'), ('b', 'f8')]))
def test_discover():
assert discover(b) == dshape('3 * {a: int64, b: float64, date: date}')
assert discover(b['a']) == dshape('3 * int64')
def test_reductions():
assert compute(t.a.sum(), b) == 6
assert compute(t.a.min(), b) == 1
assert compute(t.a.max(), b) == 3
assert compute(t.a.mean(), b) == 2.0
assert abs(compute(t.a.std(), b) - np.std([1, 2, 3])) < 1e-5
assert abs(compute(t.a.var(), b) - np.var([1, 2, 3])) < 1e-5
assert abs(compute(t.a.std(unbiased=True), b) - np.std([1, 2, 3],
ddof=1)) < 1e-5
assert abs(compute(t.a.var(unbiased=True), b) - np.var([1, 2, 3],
ddof=1)) < 1e-5
assert len(list(compute(t.distinct(), b))) == 3
assert len(list(compute(t.a.distinct(), b))) == 3
assert compute(t.a.nunique(), b) == 3
assert isinstance(compute(t.a.nunique(), b), np.integer)
assert compute(t.a.count(), b) == 3
assert isinstance(compute(t.date.count(), b), np.integer)
assert compute(t.date.nunique(), b) == 2
assert isinstance(compute(t.date.nunique(), b), np.integer)
assert compute(t.date.count(), b) == 2
assert isinstance(compute(t.a.count(), b), np.integer)
assert compute(t.a[0], b) == 1
assert compute(t.a[-1], b) == 3
assert compute(t[0], b) == compute(t[0], b)
assert compute(t[-1], b) == compute(t[-1], b)
def test_nunique():
assert compute(t.a.nunique(), b) == 3
assert compute(t.nunique(), b) == 3
def test_selection_head():
ds = dshape('var * {a: int32, b: int32, c: float64}')
b = into(bcolz.ctable,
[(i, i + 1, float(i) ** 2) for i in range(10)],
dshape=ds)
t = symbol('t', ds)
# numpy reductions return numpy scalars
assert compute((t.a < t.b).all(), b).item() is True
assert list(compute(t[t.a < t.b].a.head(10), b)) == list(range(10))
assert list(compute(t[t.a > t.b].a.head(10), b)) == []
assert into([], compute(t[t.a + t.b > t.c], b)) == [(0, 1, 0),
(1, 2, 1),
(2, 3, 4)]
assert len(compute(t[t.a + t.b > t.c].head(10), b)) # non-empty
assert len(compute(t[t.a + t.b < t.c].head(10), b)) # non-empty
def test_selection_isnan():
b = bcolz.ctable([[1, np.nan, 3], [1., 2., np.nan]], names=['a', 'b'])
t = symbol('t', discover(b))
lhs = compute(t[t.a.isnan()], b)
rhs = np.array([(np.nan, 2.0)], dtype=b.dtype)
for n in b.dtype.names:
assert np.isclose(lhs[n], rhs[n], equal_nan=True).all()
assert np.isclose(compute(t[~t.b.isnan()], b)[n],
np.array(
[(1, 1.0), (np.nan, 2.0)], dtype=b.dtype)[n],
equal_nan=True).all()
def test_count_isnan():
assert compute(to.a[~to.b.isnan()].count(), bo) == 2
def test_count_isnan_object():
assert compute(to.a[~to.b.isnan()].count(), bo) == 2
def test_count_isnan_struct():
assert compute(t[~t.b.isnan()].count(), b) == 3
def test_nrows():
assert compute(t.nrows, b) == len(b)
def test_nelements():
assert compute(t.nelements(axis=0), b) == len(b)
assert compute(t.nelements(), b) == len(b)
# This is no longer desired. Handled by compute_up
def dont_test_pre_compute():
b = bcolz.ctable(np.array([(1, 1., 10.), (2, 2., 20.), (3, 3., 30.)],
dtype=[('a', 'i8'), ('b', 'f8'), ('c', 'f8')]))
s = symbol('s', discover(b))
result = pre_compute(s[['a', 'b']], b)
assert result.names == ['a', 'b']
def eq(a, b):
return np.array_equal(a, b)
def test_unicode_field_names():
b = bcolz.ctable(np.array([(1, 1., 10.), (2, 2., 20.), (3, 3., 30.)],
dtype=[('a', 'i8'), ('b', 'f8'), ('c', 'f8')]))
s = symbol('s', discover(b))
assert eq(compute(s[u'a'], b)[:], compute(s['a'], b)[:])
assert eq(compute(s[[u'a', u'c']], b)[:], compute(s[['a', 'c']], b)[:])
assert eq(compute(s[u'a'], b)[:],
compute(s['a'], b)[:])
assert eq(compute(s[[u'a', u'c']], b)[:],
compute(s[['a', 'c']], b)[:])
def test_chunksize_inference():
b = bcolz.ctable(np.array([(1, 1., 10.), (2, 2., 20.), (3, 3., 30.)],
dtype=[('a', 'i8'), ('b', 'f8'), ('c', 'f8')]),
chunklen=2)
assert get_chunksize(b) == 2
def test_notnull():
with pytest.raises(AttributeError):
t.b.notnull
def test_by_with_single_row():
ct = bcolz.ctable([[1, 1, 3, 3], [1, 2, 3, 4]], names=list('ab'))
t = symbol('t', discover(ct))
subset = t[t.a == 3]
expr = by(subset.a, b_sum=subset.b.sum())
result = compute(expr, ct)
expected = compute(expr, ct, optimize=False)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
hdmetor/scikit-learn | sklearn/linear_model/ridge.py | 89 | 39360 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight' : sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
nakul02/systemml | src/main/python/systemml/classloader.py | 4 | 7952 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = ['createJavaObject', 'jvm_stdout', 'default_jvm_stdout', 'default_jvm_stdout_parallel_flush', 'set_default_jvm_stdout', 'get_spark_context' ]
import os
import numpy as np
import pandas as pd
import threading, time
try:
import py4j.java_gateway
from py4j.java_gateway import JavaObject
from pyspark import SparkContext
from pyspark.sql import SparkSession
except ImportError:
raise ImportError('Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')
_loadedSystemML = False
def get_spark_context():
"""
Internal method to get already initialized SparkContext. Developers should always use
get_spark_context() instead of SparkContext._active_spark_context to ensure SystemML loaded.
Returns
-------
sc: SparkContext
SparkContext
"""
if SparkContext._active_spark_context is not None:
sc = SparkContext._active_spark_context
global _loadedSystemML
if not _loadedSystemML:
createJavaObject(sc, 'dummy')
_loadedSystemML = True
return sc
else:
raise Exception('Expected spark context to be created.')
_in_jvm_stdout = False
default_jvm_stdout = True
default_jvm_stdout_parallel_flush = True
def set_default_jvm_stdout(enable, parallel_flush=True):
"""
This is useful utility method to get the output of the driver JVM from within a Jupyter notebook
Parameters
----------
enable: boolean
Should flush the stdout by default when mlcontext.execute is invoked
parallel_flush: boolean
Should flush the stdout in parallel
"""
global default_jvm_stdout, default_jvm_stdout_parallel_flush
default_jvm_stdout = enable
default_jvm_stdout_parallel_flush = parallel_flush
# This is useful utility class to get the output of the driver JVM from within a Jupyter notebook
# Example usage:
# with jvm_stdout():
# ml.execute(script)
class jvm_stdout(object):
"""
This is useful utility class to get the output of the driver JVM from within a Jupyter notebook
Parameters
----------
parallel_flush: boolean
Should flush the stdout in parallel
"""
def __init__(self, parallel_flush=False):
self.util = get_spark_context()._jvm.org.apache.sysml.api.ml.Utils()
self.parallel_flush = parallel_flush
self.t = threading.Thread(target=self.flush_stdout)
self.stop = False
def flush_stdout(self):
while not self.stop:
time.sleep(1) # flush stdout every 1 second
str = self.util.flushStdOut()
if str != '':
str = str[:-1] if str.endswith('\n') else str
print(str)
def __enter__(self):
global _in_jvm_stdout
if _in_jvm_stdout:
# Allow for nested jvm_stdout
self.donotRedirect = True
else:
self.donotRedirect = False
self.util.startRedirectStdOut()
if self.parallel_flush:
self.t.start()
_in_jvm_stdout = True
def __exit__(self, *args):
global _in_jvm_stdout
if not self.donotRedirect:
if self.parallel_flush:
self.stop = True
self.t.join()
print(self.util.stopRedirectStdOut())
_in_jvm_stdout = False
_initializedSparkSession = False
def _createJavaObject(sc, obj_type):
# -----------------------------------------------------------------------------------
# Avoids race condition between locking of metastore_db of Scala SparkSession and PySpark SparkSession.
# This is done at toDF() rather than import level to avoid creation of SparkSession in worker processes.
global _initializedSparkSession
if not _initializedSparkSession:
_initializedSparkSession = True
SparkSession.builder.getOrCreate().createDataFrame(pd.DataFrame(np.array([[1,2],[3,4]])))
# -----------------------------------------------------------------------------------
if obj_type == 'mlcontext':
return sc._jvm.org.apache.sysml.api.mlcontext.MLContext(sc._jsc)
elif obj_type == 'dummy':
return sc._jvm.org.apache.sysml.utils.SystemMLLoaderUtils()
else:
raise ValueError('Incorrect usage: supported values: mlcontext or dummy')
def _getJarFileNames(sc):
import imp, fnmatch
jar_file_name = '_ignore.jar'
java_dir = os.path.join(imp.find_module("systemml")[1], "systemml-java")
jar_file_names = []
for file in os.listdir(java_dir):
if fnmatch.fnmatch(file, 'systemml-*-SNAPSHOT.jar') or fnmatch.fnmatch(file, 'systemml-*.jar'):
jar_file_names = jar_file_names + [ os.path.join(java_dir, file) ]
return jar_file_names
def _getLoaderInstance(sc, jar_file_name, className, hint):
err_msg = 'Unable to load systemml-*.jar into current pyspark session.'
if os.path.isfile(jar_file_name):
sc._jsc.addJar(jar_file_name)
jar_file_url = sc._jvm.java.io.File(jar_file_name).toURI().toURL()
url_class = sc._jvm.java.net.URL
jar_file_url_arr = sc._gateway.new_array(url_class, 1)
jar_file_url_arr[0] = jar_file_url
url_class_loader = sc._jvm.java.net.URLClassLoader(jar_file_url_arr, sc._jsc.getClass().getClassLoader())
c1 = sc._jvm.java.lang.Class.forName(className, True, url_class_loader)
return c1.newInstance()
else:
raise ImportError(err_msg + ' Hint: Download the jar from http://systemml.apache.org/download and ' + hint )
def createJavaObject(sc, obj_type):
"""
Performs appropriate check if SystemML.jar is available and returns the handle to MLContext object on JVM
Parameters
----------
sc: SparkContext
SparkContext
obj_type: Type of object to create ('mlcontext' or 'dummy')
"""
try:
return _createJavaObject(sc, obj_type)
except (py4j.protocol.Py4JError, TypeError):
ret = None
err_msg = 'Unable to load systemml-*.jar into current pyspark session.'
hint = 'Provide the following argument to pyspark: --driver-class-path '
jar_file_names = _getJarFileNames(sc)
if len(jar_file_names) != 2:
raise ImportError('Expected only systemml and systemml-extra jars, but found ' + str(jar_file_names))
for jar_file_name in jar_file_names:
if 'extra' in jar_file_name:
x = _getLoaderInstance(sc, jar_file_name, 'org.apache.sysml.api.dl.Caffe2DMLLoader', hint + 'systemml-*-extra.jar')
x.loadCaffe2DML(jar_file_name)
else:
x = _getLoaderInstance(sc, jar_file_name, 'org.apache.sysml.utils.SystemMLLoaderUtils', hint + 'systemml-*.jar')
x.loadSystemML(jar_file_name)
try:
ret = _createJavaObject(sc, obj_type)
except (py4j.protocol.Py4JError, TypeError):
raise ImportError(err_msg + ' Hint: ' + hint + jar_file_name)
return ret
| apache-2.0 |
xiaoxiamii/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
looooo/paraBEM | examples/plots/lifting_line.py | 1 | 1404 | from __future__ import division
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import paraBEM
from paraBEM.liftingline import LiftingLine
from paraBEM.utils import check_path
# WingGeometry
spw = 2
numpos = 50
z_fac_1 = -0.3
z_fac_2 = -0.7
y = np.sin(np.linspace(0, np.pi/2, numpos)) * spw/2
x = [0. for _ in y]
z = [i**2 * z_fac_1 + i**6 * z_fac_2 for i in y]
mirror = lambda xyz: [xyz[0], -xyz[1], xyz[2]]
wing = list(zip(x, y, z))
wing = list(map(mirror, wing))[::-1] + list(wing)[1:]
wing = [paraBEM.Vector3(*i) for i in wing]
# LiftingLine
lifting_line = LiftingLine(wing)
lifting_line.v_inf = paraBEM.Vector3(1, 0, 0)
lifting_line.solve_for_best_gamma(1)
gamma = [i.best_gamma for i in lifting_line.segments]
gamma_max = max(gamma)
# Plot
gamma_el = lambda y: gamma_max * (1 - (y / spw * 2)**2)**(1 / 2)
mids = [[i.mids.x, i.mids.y, i.mids.z] for i in lifting_line.segments]
x, y, z = zip(*mids)
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax1.plot(y, z)
ax2 = fig.add_subplot(3, 1, 2)
ax2.plot(y, x, marker="x")
ax3 = fig.add_subplot(3, 1, 3)
y_el = np.linspace(-1, 1, 400)
ax3.plot([-spw/2] + list(y) + [spw/2], [0] + gamma + [0], marker="x")
ax3.plot(y_el, list(map(gamma_el, y_el)))
plt.savefig(check_path("results/2d/liftingline.png"))
total = 0
for i in lifting_line.segments:
total += i.lift_factor * i.best_gamma
print(total)
| gpl-3.0 |
ipashchenko/emcee-x | document/plots/oned.py | 16 | 2164 | import os
import sys
import time
import numpy as np
import matplotlib.pyplot as pl
import h5py
from multiprocessing import Pool
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import emcee
# import acor
def lnprobfn(p, icov):
return -0.5 * np.dot(p, np.dot(icov, p))
def random_cov(ndim, dof=1):
v = np.random.randn(ndim * (ndim + dof)).reshape((ndim + dof, ndim))
return (sum([np.outer(v[i], v[i]) for i in range(ndim + dof)])
/ (ndim + dof))
_rngs = {}
def _worker(args):
i, outfn, nsteps = args
pid = os.getpid()
_random = _rngs.get(pid, np.random.RandomState(int(int(pid)
+ time.time())))
_rngs[pid] = _random
ndim = int(np.ceil(2 ** (7 * _random.rand())))
nwalkers = 2 * ndim + 2
# nwalkers += nwalkers % 2
print ndim, nwalkers
cov = random_cov(ndim)
icov = np.linalg.inv(cov)
ens_samp = emcee.EnsembleSampler(nwalkers, ndim, lnprobfn,
args=[icov])
ens_samp.random_state = _random.get_state()
pos, lnprob, state = ens_samp.run_mcmc(np.random.randn(nwalkers * ndim)
.reshape([nwalkers, ndim]), nsteps)
proposal = np.diag(cov.diagonal())
mh_samp = emcee.MHSampler(proposal, ndim, lnprobfn,
args=[icov])
mh_samp.random_state = state
mh_samp.run_mcmc(np.random.randn(ndim), nsteps)
f = h5py.File(outfn)
f["data"][i, :] = np.array([ndim, np.mean(ens_samp.acor),
np.mean(mh_samp.acor)])
f.close()
def oned():
nsteps = 10000
niter = 10
nthreads = 2
outfn = os.path.join(os.path.split(__file__)[0], "gauss_scaling.h5")
print outfn
f = h5py.File(outfn, "w")
f.create_dataset("data", (niter, 3), "f")
f.close()
pool = Pool(nthreads)
pool.map(_worker, [(i, outfn, nsteps) for i in range(niter)])
f = h5py.File(outfn)
data = f["data"][...]
f.close()
pl.clf()
pl.plot(data[:, 0], data[:, 1], "ks", alpha=0.5)
pl.plot(data[:, 0], data[:, 2], ".k", alpha=0.5)
pl.savefig(os.path.join(os.path.split(__file__)[0], "gauss_scaling.png"))
if __name__ == "__main__":
oned()
| mit |
rabrahm/ceres | utils/FastRotators/spfr.py | 1 | 18831 | from pylab import *
import pyfits
from PyAstronomy import pyasl
import scipy
from scipy import interpolate
from scipy import ndimage
from scipy import signal
import pickle
from matplotlib.backends.backend_pdf import PdfPages
import os
#from pyevolve import G1DList
#from pyevolve import GSimpleGA
from multiprocessing import Pool
import time
def download_models(webpage='http://svo2.cab.inta-csic.es/theory/models/coelho/high/data/',dest='../../data/'):
os.system('mkdir '+dest+'/COELHO2014')
cwd = os.getcwd()
os.chdir(dest+'/COELHO2014')
tf = np.arange(6000,10001,250)
gf = np.arange(2.5,4.6,0.5)
#gf = np.array([2.5])
zf = np.array([-1.,-0.5,0.0,0.2])
for t in tf:
for g in gf:
for z in zf:
modname = get_modname(t,g,z)
if z<0:
sz = 'm'
else:
sz = 'p'
sz = sz+str(float(np.absolute(z))).replace('.','')+'p00/'
os.system('wget ' + webpage+sz+modname+'.fits')
os.system('wget ' + webpage+sz+modname+'plc.fits')
os.chdir(cwd)
return True
def n_Edlen(l):
sigma = 1e4 / l
sigma2 = sigma*sigma
n = 1 + 1e-8 * (8342.13 + 2406030 / (130-sigma2) + 15997/(38.9-sigma2))
return n
def n_Morton(l):
sigma = 1e4 / l
sigma2 = sigma*sigma
n = 1 + 6.4328e-5 + 2.94981e-2 / (146.-sigma2) + 2.5540e-4/(41.-sigma2)
return n
def ToAir(l):
return (l / n_Edlen(l))
def ToVacuum(l):
cond = 1
l_prev = l.copy()
while(cond):
l_new = n_Edlen(l_prev) * l
if (max(np.absolute(l_new - l_prev)) < 1e-10): cond = 0
l_prev = l_new
return l_prev
def get_modname(t,g,z):
st = str(int(t))
if t<10000:
st = '0'+st
sg = '+'+str(np.around(g,1))
if z < 0:
sz = 'm'
else:
sz = 'p'
z=float(z)
sz = sz + str(np.around(np.absolute(z),1))
sz = sz.replace('.','')
return 't'+st+'_g'+sg+'_'+sz+'p00_hr'
def get_model(t,g,z,model_path='../../data/COELHO2014/'):
modname = model_path + get_modname(t,g,z)
try:
out = pyfits.getdata(modname+'.fits')
except:
out = pyfits.getdata(modname+'plc.fits')
return out
def get_near(x,vec):
if x == vec[0]:
mmin = vec[0]
mmax = vec[1]
elif x == vec[-1]:
mmin = vec[-2]
mmax = vec[-1]
else:
tvec = vec - x
In = np.where(tvec < 0)[0]
mmin = tvec[In].max() + x
Ix = np.where(tvec >= 0)[0]
mmax = tvec[Ix].min() + x
return mmin,mmax
def trilinear_interpolation(t,g,z,model_path='../../data/COELHO2014/'):
teffs = np.arange(6000,10001,250)
loggs = np.arange(2.5,4.6,0.5)
fehs = np.array([-1.,-0.5,0.0,0.2])
x0,x1 = get_near(t,teffs)
y0,y1 = get_near(g,loggs)
z0,z1 = get_near(z,fehs)
xd = (t-x0)/(x1-x0)
yd = (g-y0)/(y1-y0)
zd = (z-z0)/(z1-z0)
try:
hd = pyfits.getheader(model_path+get_modname(x0,y0,z0)+'.fits')
except:
hd = pyfits.getheader(model_path+get_modname(x0,y0,z0)+'plc.fits')
c000 = get_model(x0,y0,z0,model_path)
c001 = get_model(x0,y0,z1,model_path)
c010 = get_model(x0,y1,z0,model_path)
c100 = get_model(x1,y0,z0,model_path)
c110 = get_model(x1,y1,z0,model_path)
c101 = get_model(x1,y0,z1,model_path)
c011 = get_model(x0,y1,z1,model_path)
c111 = get_model(x1,y1,z1,model_path)
wav = np.arange(len(c111))*hd['CDELT1'] + hd['CRVAL1']
c00 = c000*(1-xd) + c100*xd
c01 = c001*(1-xd) + c101*xd
c10 = c010*(1-xd) + c110*xd
c11 = c011*(1-xd) + c111*xd
c0 = c00*(1-yd) + c10*yd
c1 = c01*(1-yd) + c11*yd
c = c0*(1-zd) + c1*zd
return wav,c
def normalize_model(w,f):
ow = w.copy()
of = f.copy()
#plot(w,f)
while True:
#medflts = scipy.signal.medfilt(f,1001)
coef = np.polyfit(w,f,6)
fited = np.polyval(coef,w)
res = f - fited
I = np.where(res > -np.sqrt(np.var(res)))[0]
w,f = w[I],f[I]
if len(w) < 0.3* len(ow):
break
#plot(ow,np.polyval(coef,ow))
#show()
return coef
def spec_ccf(sw,sf,mw,mf,vi,vf,dv):
mf = mf -1
mf = -mf
#plot(mw,mf)
tck = interpolate.splrep(mw,mf,k=1)
v = vi
retccf = []
vels = []
while v<=vf:
swt = sw * (1 + v/299792.458)
mft = interpolate.splev(swt,tck)
#if v == 0:
# plot(swt,mft)
# plot(swt,sft)
# show()
mft -= np.mean(mft)
sft = sf - np.mean(sf)
#sft = sf.copy()
#print np.sum(mft**2),np.sum(sft**2)
retccf.append(np.sum(mft*sft)/np.sqrt(np.sum(mft**2)*np.sum(sft**2)))
vels.append(v)
v+=dv
return np.array(vels),np.array(retccf)
def ccf_fft(swt,sft,mwt,mft):
mf = mft -1
mf = -mf
#plot(mw,mf)
tck = interpolate.splrep(np.log(mwt),mf,k=1)
sw = np.log(swt)
tck2 = interpolate.splrep(sw,sft,k=1)
nsw = np.linspace(sw[0], sw[-1], 5000)
sf = interpolate.splev(nsw,tck2)
mf = interpolate.splev(nsw,tck)
sf -= np.mean(sf)
mf -= np.mean(mf)
plot(nsw,sf)
plot(nsw,mf)
show()
retccf = np.fft.ifft(np.conj(np.fft.fft(sf))*np.fft.fft(mf))
retccf = np.hstack((retccf[2500:],retccf[:2500]))
retvels = np.arange(len(retccf)) - 0.5*len(retccf)
retvels *= (nsw[1]-nsw[0])
retvels = 299792.458*(np.exp(retvels)-1.)
return retvels, retccf
def ccf_simple(sw,sf,mw,mf,rv):
mf = mf -1
mf = -mf
#plot(mw,mf)
tck = interpolate.splrep(mw,mf,k=1)
swt = sw * (1 + rv/299792.458)
mft = interpolate.splev(swt,tck)
mft -= np.mean(mft)
sft = sf - np.mean(sf)
return np.sum(mft*sft)/np.sqrt(np.sum(mft**2)*np.sum(sft**2))
def clean_strong_lines(mw,sc,mode=1):
if mode==1:
#""""
I = np.where((mw>6520)&(mw<6600))[0]
sc[I] = 1.
I = np.where((mw>5888)&(mw<5897))[0]
sc[I] = 1.
I = np.where((mw>4310)&(mw<4360))[0]
sc[I] = 1.
I = np.where((mw>4840)&(mw<4880))[0]
sc[I] = 1.
I = np.where((mw>4070)&(mw<4130))[0]
sc[I] = 1.
I = np.where((mw>3875)&(mw<3900))[0]
sc[I] = 1.
I = np.where((mw>3920)&(mw<3945))[0]
sc[I] = 1.
I = np.where((mw>3955)&(mw<3980))[0]
sc[I] = 1.
I = np.where(mw<3850)[0]
sc[I] = 1.
#"""
if mode==2:
#""""
I = np.where((mw>6550)&(mw<6570))[0]
sc[I] = 1.
I = np.where((mw>5888)&(mw<5897))[0]
sc[I] = 1.
I = np.where((mw>4320)&(mw<4350))[0]
sc[I] = 1.
I = np.where((mw>4850)&(mw<4870))[0]
sc[I] = 1.
I = np.where((mw>4090)&(mw<4110))[0]
sc[I] = 1.
I = np.where((mw>3875)&(mw<3900))[0]
sc[I] = 1.
I = np.where((mw>3920)&(mw<3945))[0]
sc[I] = 1.
I = np.where((mw>3955)&(mw<3980))[0]
sc[I] = 1.
I = np.where(mw<3850)[0]
sc[I] = 1.
#"""
return sc
def RVforFR(wavs,flxs,teff=6700,logg=4.0,feh=-1.0,vsini=100.,model_path='../../data/COELHO2014/',vmin=-1000.,vmax=1000.,vstep=10.):
def fitfunc(p,x):
ret = p[3] + p[0] * np.exp(-.5*((x-p[1])/p[2])**2)
return ret
errfunc = lambda p,x,y: np.ravel( (fitfunc(p,x)-y) )
#sc = get_model(teff,logg,feh)
#hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits')
#wav = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1']
teff = float(teff)
try:
sc = get_model(teff,logg,feh)
hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits')
mw = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1']
except:
mw,sc = trilinear_interpolation(teff,logg,feh,model_path)
for order in range(len(flxs)):
flxs[order] = clean_strong_lines(wavs[order],flxs[order])
sc = clean_strong_lines(mw,sc)
II = np.where(sc != 1)[0]
JJ = np.where(sc == 1)[0]
coef = normalize_model(mw[II],sc[II])
sc /= np.polyval(coef,mw)
sc[JJ] = 1.
mw = ToVacuum(mw)
weis1 = []
ccftot = []
for i in range(wavs.shape[0]):
#plot(wavs[i],flxs[i])
scf = flxs[i]
scw = wavs[i]
J = np.where(scf!=0)[0]
scw,scf = scw[J],scf[J]
I = np.where((mw>scw[0]-100) & (mw<scw[-1]+100))
tmf = pyasl.fastRotBroad(mw[I], sc[I], 0.5, vsini)
#plot(mw[I],tmf)
J = np.where(scf!=1)[0]
if len(J)>100:
ccv,ccf = spec_ccf(scw,scf,mw[I],tmf,vmin,vmax,vstep)
#plot(ccv,ccf)
#show()
#ccf = np.array(ccf)
wei1 = len(np.where(scf!=1)[0])**2
weis1.append(wei1)
if len(ccftot)==0:
ccftot = ccf.copy()*wei1
else:
ccftot = np.vstack((ccftot,ccf.copy()*wei1))
#show()
weis1 = np.array(weis1)
ccftot = np.sum(ccftot,axis=0)/ np.sum(weis1)
p0 = [ccftot.min(),ccv[np.argmin(ccftot)],vsini,ccftot[0]]
p1, success = scipy.optimize.leastsq(errfunc,p0, args=(ccv,ccftot))
return p1,ccv,ccftot,fitfunc(p1,ccv)
def calc_bss2(vels,xc,coef, bot_i=0.15, bot_f=0.4, top_i=0.6, top_f=0.9, dt=0.01):
try:
I1 = np.where((vels>coef[1]-3*coef[2]) & (vels<coef[1]) )[0]
I2 = np.where((vels<coef[1]+3*coef[2]) & (vels>coef[1]) )[0]
I3 = np.where(vels<coef[1]-4*coef[2])[0]
I4 = np.where(vels>coef[1]+4*coef[2])[0]
I = np.hstack((I3,I4))
base = np.median(xc[I])
xc = base - xc
xc /= xc.max()
v1,x1 = vels[I1],xc[I1]
v2,x2 = vels[I2],xc[I2]
#plot(v1,x1)
#plot(v2,x2)
#show()
dp = top_f
vect = []
while dp >= top_i:
lb = np.where(x1>dp)[0][0]
m = (v1[lb] - v1[lb-1])/(x1[lb]-x1[lb-1])
n = v1[lb] - m*x1[lb]
bs1 = m*dp+n
lb = np.where(x2>dp)[0][-1]
m = (v2[lb] - v2[lb+1])/(x2[lb]-x2[lb+1])
n = v2[lb] - m*x2[lb]
bs2 = m*dp+n
vect.append(0.5*(bs2+bs1))
dp-=dt
vect = np.array(vect)
dp = bot_f
vecb = []
while dp >= bot_i:
lb = np.where(x1>dp)[0][0]
m = (v1[lb] - v1[lb-1])/(x1[lb]-x1[lb-1])
n = v1[lb] - m*x1[lb]
bs1 = m*dp+n
lb = np.where(x2>dp)[0][-1]
m = (v2[lb] - v2[lb+1])/(x2[lb]-x2[lb+1])
n = v2[lb] - m*x2[lb]
bs2 = m*dp+n
vecb.append(0.5*(bs2+bs1))
dp-=dt
vecb = np.array(vecb)
return np.median(vecb) - np.median(vect)
except:
return -999.0
"""
def lnlike(theta, W, F, Ferr):
mw,sc = trilinear_interpolation(int(theta[0]),theta[1],theta[2])
sct = clean_strong_lines(mw,sc.copy())
#plot(mw,sc)
#show()
coef = normalize_model(mw,sct)
sc /= np.polyval(coef,mw)
#print gfd
mw = ToVacuum(mw)
mw *= 1 + theta[3]/299792.458
totD,totM,totE = np.array([]),np.array([]),np.array([])
for i in range(W.shape[0]):
scf = F[i]
scw = W[i]
scfe = Ferr[i]
J = np.where(scf!=0)[0]
scw,scf,scfe = scw[J],scf[J],scfe[J]
I = np.where((mw>scw[0]-10) & (mw<scw[-1]+10))
tmf = pyasl.fastRotBroad(mw[I], sc[I], 0.5, theta[4])
tck = interpolate.splrep(mw[I],tmf,k=1)
tmf = interpolate.splev(scw,tck)
tmf = clean_strong_lines(scw,tmf.copy())
I = np.where(tmf!=1)[0]
#plot(scw,tmf)
#plot(scw[I],tmf[I])
#plot(scw[I],scf[I])
#show()
#print gfd
tmf = tmf[I]
scf = scf[I]
scfe = scfe[I]
tmf /= np.sum(tmf)
tsf = scf/np.sum(scf)
tse = scfe*(np.sum(scf)**2)
totD = np.hstack((totD,tsf))
totM = np.hstack((totM,tmf))
totE = np.hstack((totE,tse))
#plot(scw[I],tsf)
#plot(scw[I],tmf)
#plot(scw[I],tsf + 1./np.sqrt(tse))
#show()
#print fds
#print theta
#show()
#print gvfd
#ret = -np.log(2*np.pi) + np.log(np.sum(np.exp(-0.5*((y-model)/yerr)**2)/yerr))
#ret = -0.5*(np.sum(inv_sigma2*(F-model)**2 - np.log(inv_sigma2)))
ret = -0.5*(np.sum(totE*(totD-totM)**2 - np.log(totE)))
#for i in range(len(F)):
# errorbar(Y,F[i],yerr=Ferr[i],fmt='b')
#for j in model:
# plot(Y,j,'r')
#show()
#print theta, ret
if np.isnan(ret):
return -np.inf
else:
return ret
def lnprior(theta):
if 6000 < theta[0] < 9000 and 3.0 < theta[1] < 4.5 and -1 < theta[2] < 0.2 and -500 < theta[3] < 500 and 1. < theta[4] < 500.:
return 0.0
return -np.inf
def lnprob(theta, W,F,Ferr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta,W,F,Ferr)
"""
def multiccf(pars):
teff,logg,feh,vsini=pars[0],pars[1],pars[2],pars[3]
vmin=-500
vmax=500.
vstep=20.
sc = get_model(teff,logg,feh)
hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits')
wav = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1']
try:
sc = get_model(teff,logg,feh)
hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits')
mw = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1']
except:
mw,sc = trilinear_interpolation(teff,logg,feh,model_path)
sc = clean_strong_lines(mw,sc)
II = np.where(sc != 1)[0]
JJ = np.where(sc == 1)[0]
coef = normalize_model(mw[II],sc[II])
sc /= np.polyval(coef,mw)
sc[JJ] = 1.
mw = ToVacuum(mw)
weis1 = []
ccftot = []
for i in range(wavs.shape[0]):
scf = flxs[i].copy()
scw = wavs[i].copy()
J = np.where(scf!=0)[0]
scw,scf = scw[J],scf[J]
I = np.where((mw>scw[0]-100) & (mw<scw[-1]+100))
tmf = pyasl.fastRotBroad(mw[I], sc[I], 0.5, vsini)
#plot(mw[I],tmf)
J = np.where(scf!=1)[0]
if len(J)>100:
ccv,ccf = spec_ccf(scw,scf,mw[I],tmf,vmin,vmax,vstep)
#ccv,ccf = ccf_fft(scw,scf,mw[I],tmf)
#plot(ccv,ccf)
#show()
wei1 = len(np.where(scf!=1)[0])**2
weis1.append(wei1)
if len(ccftot)==0:
ccftot = ccf.copy()*wei1
else:
ccftot = np.vstack((ccftot,ccf.copy()*wei1))
weis1 = np.array(weis1)
ccftot = np.sum(ccftot,axis=0)/ np.sum(weis1)
#print gfds
#ccftot = np.mean(ccftot,axis=0)
#print pars, ccftot.min()
return ccftot.min()
def get_pars_fr(wavst,flxst,model_patht='../../data/COELHO2014/',npools=4,fixG=1.0):
for order in range(len(flxst)):
flxst[order] = clean_strong_lines(wavst[order],flxst[order],mode=1)
t0 = time.time()
global wavs,flxs
global model_path
wavs,flxs=wavst.copy(),flxst.copy()
model_path=model_patht
gt = np.array([6000,7000,8000,9000,10000])
gg = np.array([2.5,3.0,3.5,4.0,4.5])
if fixG != -1:
gg = np.array([fixG])
gz = np.array([-1,-0.5,0.0,0.2])
gr = np.array([10.,50.,100.,150.,200.,250.,300.])
#"""
tr = np.tile(gr,len(gt)*len(gg)*len(gz))
tg = np.repeat(np.tile(gg,len(gt)),len(gr)*len(gz))
tz = np.repeat(np.tile(gz,len(gt)*len(gg)),len(gr))
tt = np.repeat(gt,len(gg)*len(gr)*len(gz))
tot = np.vstack((tt,tg,tz,tr)).T
#for pars in tot:
# pars = [8000,4.0,-0.5,40.0]
# print pars, multiccf(pars)
p = Pool(npools)
vals = np.array((p.map(multiccf, list(tot))))
p.terminate()
I = np.argmin(vals)
best_vals = tot[I]
bt,bg,bz,br = best_vals[0],best_vals[1],best_vals[2],best_vals[3]
#"""
t1 = time.time()
print bt,bg,bz,br, (t1-t0)/60.,'mins'
#bt,bg,bz,br = 7000.,4.5, 0.2, 100.0
gt = np.arange(bt-1000,bt+1001,250)
I = np.where((gt>=6000) & (gt<=10000))[0]
gt = gt[I]
gr = np.arange(br-60.,br+61.,20.)
I = np.where(gr>=10)[0]
gr = gr[I]
tr = np.tile(gr,len(gt)*len(gg)*len(gz))
tg = np.repeat(np.tile(gg,len(gt)),len(gr)*len(gz))
tz = np.repeat(np.tile(gz,len(gt)*len(gg)),len(gr))
tt = np.repeat(gt,len(gg)*len(gr)*len(gz))
tot = np.vstack((tt,tg,tz,tr)).T
p = Pool(npools)
vals = np.array((p.map(multiccf, list(tot))))
p.terminate()
I = np.argmin(vals)
best_vals = tot[I]
bt,bg,bz,br = best_vals[0],best_vals[1],best_vals[2],best_vals[3]
t2 = time.time()
print bt,bg,bz,br, (t2-t1)/60.,'mins'
#np.savetxt('temp_grid.txt',vals)
if fixG==-1:
grid = np.reshape(vals,(len(gt),len(gg),len(gz),len(gr)))
tckt = interpolate.splrep(gt,np.arange(len(gt)),k=1)
tckg = interpolate.splrep(gg,np.arange(len(gg)),k=1)
tckz = interpolate.splrep(gz,np.arange(len(gz)),k=1)
tckr = interpolate.splrep(gr,np.arange(len(gr)),k=1)
itckt = interpolate.splrep(np.arange(len(gt)),gt,k=1)
itckg = interpolate.splrep(np.arange(len(gg)),gg,k=1)
itckz = interpolate.splrep(np.arange(len(gz)),gz,k=1)
itckr = interpolate.splrep(np.arange(len(gr)),gr,k=1)
st = np.arange(gt[0],gt[-1]+1,10.)
sg = np.arange(gg[0],gg[-1]+0.01,0.1)
sz = np.arange(gz[0],gz[-1]+0.01,0.1)
sr = np.arange(gr[0],gr[-1]+1.,5.)
st = interpolate.splev(st,tckt)
sg = interpolate.splev(sg,tckg)
sz = interpolate.splev(sz,tckz)
sr = interpolate.splev(sr,tckr)
tr2 = np.tile(sr,len(st)*len(sg)*len(sz))
tg2 = np.repeat(np.tile(sg,len(st)),len(sr)*len(sz))
tz2 = np.repeat(np.tile(sz,len(st)*len(sg)),len(sr))
tt2 = np.repeat(st,len(sg)*len(sr)*len(sz))
tot2 = np.vstack((tt2,tg2,tz2,tr2))
zi = ndimage.map_coordinates(grid, tot2, order=3, mode='nearest')
I = np.argmin(zi)
minval = tot2[:,I]
mint = interpolate.splev(minval[0],itckt)
ming = interpolate.splev(minval[1],itckg)
minz = interpolate.splev(minval[2],itckz)
minr = interpolate.splev(minval[3],itckr)
else:
grid = np.reshape(vals,(len(gt),len(gz),len(gr)))
tckt = interpolate.splrep(gt,np.arange(len(gt)),k=1)
tckz = interpolate.splrep(gz,np.arange(len(gz)),k=1)
tckr = interpolate.splrep(gr,np.arange(len(gr)),k=1)
itckt = interpolate.splrep(np.arange(len(gt)),gt,k=1)
itckz = interpolate.splrep(np.arange(len(gz)),gz,k=1)
itckr = interpolate.splrep(np.arange(len(gr)),gr,k=1)
st = np.arange(gt[0],gt[-1]+1,10.)
sz = np.arange(gz[0],gz[-1]+0.01,0.1)
sr = np.arange(gr[0],gr[-1]+1.,5.)
st = interpolate.splev(st,tckt)
sz = interpolate.splev(sz,tckz)
sr = interpolate.splev(sr,tckr)
tr2 = np.tile(sr,len(st)*len(sz))
tz2 = np.repeat(np.tile(sz,len(st)),len(sr))
tt2 = np.repeat(st,len(sr)*len(sz))
tot2 = np.vstack((tt2,tz2,tr2))
zi = ndimage.map_coordinates(grid, tot2, order=3, mode='nearest')
I = np.argmin(zi)
minval = tot2[:,I]
mint = interpolate.splev(minval[0],itckt)
ming = fixG
minz = interpolate.splev(minval[1],itckz)
minr = interpolate.splev(minval[2],itckr)
#d = {'grid':grid, 'zi':zi, 'tot2':tot2, 'gt':gt, 'gg':gg, 'gz':gz, 'gr':gr}
#pickle.dump(d,open('temp_dict.pkl'))
return float(mint),float(ming),float(minz),float(minr)
def plot_CCF_FR(xc_dict,path='XC.pdf'):
vels = xc_dict['vels']
xc_av = xc_dict['xc_av']
XCmodelgau = xc_dict['XCmodelgau']
#refvel = xc_dict['refvel']
p1gau = xc_dict['p1gau']
f1 = figure()
pp = PdfPages(path)
ax1 = f1.add_subplot(111)
ax1.plot(vels, xc_av,'b.', label='CCF')
ax1.plot(vels, XCmodelgau,'r-',label='Gaussian fit')
xlabel('Velocity (km/s)')
ylabel('XC')
ax1.axvline(p1gau[1],linestyle=':',color='r')
ax1.axhline(0.0,linestyle='-')
title('Average Cross-Correlation Function + Fit')
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles[::-1], labels[::-1],prop={'size':6})
pp.savefig()
pp.close()
clf()
pass
"""
def trans_chromosome(chromosome):
teff = chromosome[0]*100.+chromosome[1]*10.+chromosome[2]
m = (10000.- 6000.)/999.
n = 6000.
teff = teff*m + n
logg = chromosome[3] + chromosome[4]*0.1
m = (4.5 - 3.0)/9.9
n = 3.
logg = logg*m + n
feh = chromosome[5] + chromosome[6]*0.1
m = (0.2 - -1.)/9.9
n = -1.
feh = feh*m + n
vsini = chromosome[7]*10. + chromosome[8]
m = (300. - 10.)/99.
n = 10.
vsini = vsini*m + n
return teff, logg, feh, vsini
global wavs, flxs
def find_pars_GA(wavs,flxs,model_path='../../data/COELHO2014/'):
def eval_func(chromosome):
print list(chromosome)
teff, logg, feh, vsini = trans_chromosome(chromosome)
print teff, logg, feh, vsini
pt,vels,ccf,mod = RVforFR(wavs,flxs,teff=teff,logg=logg,feh=feh,vsini=vsini,model_path=model_path)
score = -ccf.min()
return score
genome = G1DList.G1DList(9)
genome.evaluator.set(eval_func)
ga = GSimpleGA.GSimpleGA(genome, interactiveMode=True)
ga.setGenerations(40)
ga.setMutationRate(0.2)
ga.setPopulationSize(20)
#ga.setCrossoverRate(1.0)
genome.setParams(rangemin=0, rangemax=9)
#ga.setMultiProcessing(True)
ga.evolve(freq_stats=10)
print ga.bestIndividual()
print trans_chromosome(ga.bestIndividual())
"""
| mit |
levelrf/level_basestation | gr-filter/examples/fir_filter_ccc.py | 13 | 3154 | #!/usr/bin/env python
from gnuradio import gr, filter
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
self.head = gr.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = gr.vector_sink_c()
self.vsnk_out = gr.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
bibarz/bibarz.github.io | dabble/ab/auth_algorithms.py | 1 | 17145 | # Import any required libraries or modules.
import numpy as np
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import csv
import sys
class MetaParams:
n_lda_ensemble = 101
lda_ensemble_feature_fraction = 0.4
mode = 'lda_ensemble'
# The following is a hacky container for Statistics computed from the
# whole training set; we don't want to have to recompute them again at every call
# to build_template (it becomes slow for parameter searches with cross validation),
# so we preserve it here between calls. The proper place to
# do this would be in main.py, but we don't want to touch that.
Global = lambda: None
Global.ready = False
def pca_converter(data, feature_discriminabilities, explained_variance):
'''
PCA conversion of the data. The PCA is based on the complete dataset, but each feature
is normalized to a std dev proportional to the given discriminability.
:param data: n_samples x n_features matrix with all data to do PCA on
:param feature_discriminabilities: n_features length vector
:param explained_variance: ratio of explained variance (between 0 and 1) that will
determine how many components are kept
:return: function transforming data into pca components, and covariance matrix
of transformed data
'''
mu = np.mean(data, axis=0)
std = np.std(data, axis=0) / feature_discriminabilities
normalized_data = (data - mu) / std
u, s, vt = np.linalg.svd(normalized_data)
cut_idx = np.argmin(np.abs(np.cumsum(s * s) / np.sum(s * s) - explained_variance))
vt = vt[:cut_idx + 1]
return (lambda x, mu=mu, std=std, vt=vt: np.dot((x - mu) / std, vt.T)),\
np.diag(s[:cut_idx + 1] ** 2 / (len(data) - 1))
def preprocess_data(data):
'''
Turn raw data into an array of hand-picked features useful for classification
:param data: n_samples x n_raw_features numpy array
:return: n_samples x n_processed_features array
'''
keypress_dt = data[:, 8::10] - data[:, 3::10] # duration of each keystroke
key_to_key_dt = data[:, 13::10] - data[:, 3:-10:10] # interval between keystrokes
x_down = data[:, 4::10].astype(np.float) / data[:, 1][:, None].astype(np.float) # x relative to screen width
y_down = data[:, 5::10].astype(np.float) / data[:, 0][:, None].astype(np.float) # y relative to screen height
x_up = data[:, 9::10].astype(np.float) / data[:, 1][:, None].astype(np.float) # x relative to screen width
y_up = data[:, 10::10].astype(np.float) / data[:, 0][:, None].astype(np.float) # y relative to screen height
size_down = data[:, 6::10]
size_up = data[:, 11::10]
pressure_down = data[:, 7::10]
pressure_up = data[:, 12::10]
assert np.all((x_down >= 0) & (x_down <= 1) & (y_down >= 0) & (y_down <= 1))
assert np.all((x_up >= 0) & (x_up <= 1) & (y_up >= 0) & (y_up <= 1))
touch_d = np.hypot(x_down - x_up, y_down - y_up)
collected_data = np.hstack((keypress_dt, key_to_key_dt,
np.diff(x_down, axis=1), np.diff(y_down, axis=1),
touch_d,
size_down, size_up, pressure_down, pressure_up,
))
return collected_data
def get_random_feature_selector(n_all_features, feature_fraction, seed):
'''
Return a selector of random features from a data array
:param n_all_features: total number of features
:param feature_fraction: desired fraction of selected features
:param seed: random seed for repeatable experiments
:return: a function taking in full data and returning only the random features from it
'''
n_features = int(np.round(feature_fraction * n_all_features))
rng = np.random.RandomState(seed)
p = rng.permutation(n_all_features)[:n_features]
return lambda x, p=p: x[..., p]
def simple_gaussian(user_pca):
# template will consist of mean and std dev of each feature in pca space
mean_pca = np.mean(user_pca, axis=0)
std_pca = np.std(user_pca, axis=0)
return mean_pca, std_pca
def scikit_classifier(user, training_dataset, generator=lambda:KNeighborsClassifier(5)):
'''
Train a given classifier on user vs others
:param generator: a function creating a scikit classifier with fit and predict functions
:return: the trained classifier
'''
all_users = training_dataset.keys()
others_raw = np.vstack([training_dataset[u] for u in all_users if u != user])
others_pca = Global.pca(preprocess_data(others_raw))
user_raw = training_dataset[user]
user_pca = Global.pca(preprocess_data(user_raw))
clf = generator()
clf.fit(np.vstack((user_pca, others_pca)),
np.hstack((np.zeros(len(user_pca)), np.ones(len(others_pca)))))
return clf
def lda(user_pca, all_pca_cov, n_all):
'''
Compute the Fisher discriminant vector and threshold to classify user vs others.
:param user_pca: n_samples x n_pca_features array of user instances
:param all_pca_cov: covariance matrix of the complete dataset; it is assumed that
the user data was part of the dataset, and that the mean of the whole dataset
is 0 for every feature
:param n_all: number of samples that formed the complete dataset
:return: Fisher discriminant vector, threshold
'''
n_user = len(user_pca)
assert n_user < n_all - 1 # make sure the complete dataset has more than just the current user
# We compute mean and variance for the user data directly, and infer the mean
# and variance of the rest of the dataset from the covariance of the complete set
# (and its mean, which is assumed zero)
user_mu = np.mean(user_pca, axis=0)
others_mu = - n_user * user_mu / (n_all - n_user)
user_sigma = np.cov(user_pca.T)
def sq_(x):
return x[:, None] * x[None, :]
others_sigma = ((n_all - 1) * all_pca_cov - (n_user - 1) * user_sigma\
- n_user * sq_(user_mu) - (n_all - n_user) * sq_(others_mu)) / (n_all - n_user - 1)
ld_vector = np.dot(np.linalg.inv(user_sigma + others_sigma), user_mu - others_mu) # order determines sign of criterion
ld_vector /= np.linalg.norm(ld_vector)
# find the threshold for equal false positives and false negatives
user_proj_mu = np.dot(user_mu, ld_vector)
others_proj_mu = np.dot(others_mu, ld_vector)
user_proj_std = np.sqrt(np.dot(ld_vector, np.dot(user_sigma, ld_vector)))
others_proj_std = np.sqrt(np.dot(ld_vector, np.dot(others_sigma, ld_vector)))
ld_threshold = (others_proj_std * user_proj_mu + user_proj_std * others_proj_mu) / (user_proj_std + others_proj_std)
return ld_vector, ld_threshold
def compute_feature_discriminabilities(each_preprocessed):
'''
Return a vector of discriminability for each feature
:param each_preprocessed: list with one n_samples x n_features data matrix for each user
:return: vector of discriminabilities (sqrt of the square of the difference of means divided by
the sum of variances) for each feature
'''
n_users = len(each_preprocessed)
each_mu = np.array([np.mean(m, axis=0) for m in each_preprocessed]) # n_users x n_features
each_var = np.array([np.var(m, axis=0) for m in each_preprocessed]) # n_users x n_features
# compute discriminability for each feature and pair of users
pairwise_discriminability = (each_mu[:, None, :] - each_mu[None :, :]) ** 2 / (1e-6 + each_var[:, None, :] + each_var[None :, :])
# compute discriminability of each feature as the average over pairs of users
return np.sqrt(np.sum(pairwise_discriminability, axis=(0, 1)) / (n_users * (n_users - 1)))
def _prepare_global(training_dataset):
'''
Processing of the complete dataset, to be reused for each user
- feature preprocessing
- pca converter
- selection of features and computation of covariances for ensemble lda
:param training_dataset: the complete dataset
:return: None. The Global container is initialized with all necessary data
'''
each_preprocessed = [preprocess_data(training_dataset[u]) for u in training_dataset]
Global.feature_discriminabilities = compute_feature_discriminabilities(each_preprocessed)
all_preprocessed = np.vstack(each_preprocessed)
Global.n_all = len(all_preprocessed)
Global.pca, Global.all_pca_cov = pca_converter(all_preprocessed, Global.feature_discriminabilities, explained_variance=0.98)
if MetaParams.mode == 'lda_ensemble':
Global.lda_ensemble = []
for i in range(MetaParams.n_lda_ensemble):
seed = np.random.randint(200000)
feature_selector = get_random_feature_selector(all_preprocessed.shape[1],
feature_fraction=MetaParams.lda_ensemble_feature_fraction, seed=seed)
selected_pca, selected_pca_cov = pca_converter(feature_selector(all_preprocessed),
feature_selector(Global.feature_discriminabilities),
explained_variance=0.99)
Global.lda_ensemble.append({'selector': feature_selector, 'pca': selected_pca, 'pca_cov': selected_pca_cov})
Global.ready = True
# Implement template building here. Feel free to write any helper classes or functions required.
# Return the generated template for that user.
def build_template(user, training_dataset):
if not Global.ready:
_prepare_global(training_dataset)
user_raw = training_dataset[user]
user_preprocessed = preprocess_data(user_raw)
template = {}
if MetaParams.mode in ['lda', 'simple', 'combined']:
user_pca = Global.pca(user_preprocessed)
template['mean_pca'], template['std_pca'] = simple_gaussian(user_pca)
template['ld_vector'], template['ld_threshold'] =\
lda(user_pca, all_pca_cov=Global.all_pca_cov, n_all=Global.n_all)
if MetaParams.mode == 'lda_ensemble':
lda_ensemble = []
for lda_item in Global.lda_ensemble:
user_selected_pca = lda_item['pca'](lda_item['selector'](user_preprocessed))
ld_vector, ld_threshold = lda(user_selected_pca, n_all=Global.n_all, all_pca_cov=lda_item['pca_cov'])
lda_ensemble.append({'ld_vector': ld_vector, 'ld_threshold': ld_threshold})
template['lda_ensemble'] = lda_ensemble
if MetaParams.mode in ['nonlinear', 'combined']:
template['clf_1'] = scikit_classifier(user, training_dataset, generator=lambda: KNeighborsClassifier(5))
template['clf_2'] = scikit_classifier(user, training_dataset, generator=lambda: svm.LinearSVC(C=0.05, class_weight='balanced'))
return template
# Implement authentication method here. Feel free to write any helper classes or functions required.
# Return the authenttication score and threshold above which you consider it being a correct user.
def authenticate(instance, user, templates):
mode = MetaParams.mode
assert mode in ['lda', 'combined', 'lda_ensemble', 'nonlinear', 'simple'], ("Unrecognized mode: %s" % mode)
t = templates[user]
batch_mode = instance.ndim > 1
if not batch_mode:
instance = instance[None, :]
preprocessed_instance = preprocess_data(instance)
if mode in ['lda', 'combined']:
user_pca = Global.pca(preprocessed_instance)
user_lda_proj = np.dot(user_pca, t['ld_vector'])
lda_score, lda_thr = user_lda_proj - t['ld_threshold'], np.zeros(len(user_lda_proj))
if mode in ['nonlinear', 'combined']:
user_pca = Global.pca(preprocessed_instance)
clf_score_1, clf_thr_1 = (t['clf_1'].predict(user_pca) == 0).astype(np.float), 0.5 * np.ones(len(user_pca))
clf_score_2, clf_thr_2 = (t['clf_2'].predict(user_pca) == 0).astype(np.float), 0.5 * np.ones(len(user_pca))
if mode == 'simple':
user_pca = Global.pca(preprocessed_instance)
z = (user_pca - t['mean_pca']) / t['std_pca']
distance = np.mean(np.abs(z) ** 2, axis=1) ** 0.5
score, thr = distance, 1.2 * np.ones(len(distance))
if mode == 'lda_ensemble':
ensemble_scores = np.empty((len(preprocessed_instance), len(t['lda_ensemble'])))
for i, sub_t in enumerate(t['lda_ensemble']):
g_item = Global.lda_ensemble[i]
user_selected_pca = g_item['pca'](g_item['selector'](preprocessed_instance))
user_thinned_lda_proj = np.dot(user_selected_pca, sub_t['ld_vector'])
ensemble_scores[:, i] = user_thinned_lda_proj - sub_t['ld_threshold']
score = np.mean(ensemble_scores > 0, axis=1)
thr = 0.5 * np.ones(len(score))
if mode == 'lda':
score, thr = lda_score, lda_thr
elif mode == 'nonlinear':
score, thr = clf_score_1, clf_thr_1
elif mode == 'combined':
score = np.mean(np.vstack((lda_score > lda_thr, clf_score_1 > clf_thr_1, clf_score_2 > clf_thr_2)), axis=0)
thr = 0.5 * np.ones(len(score))
if not batch_mode:
assert score.shape == (1, )
assert thr.shape == (1, )
score, thr = score[0], thr[0]
return score, thr
def cross_validate(full_dataset, print_results=False):
'''
n-fold cross-validation of given dataset
:param full_dataset: dictionary of raw data for each user
:param print_results: if True, print progress messages and results
:return: (percentage of false rejects, percentage of false accepts)
'''
n_folds = 5 # for cross-validation
all_false_accept = 0
all_false_reject = 0
all_true_accept = 0
all_true_reject = 0
for i in range(n_folds):
# split full dataset into training and validation
training_dataset = dict()
validation_dataset = dict()
for u in full_dataset.keys():
n = len(full_dataset[u])
idx = np.round(float(n) / n_folds * np.arange(n_folds + 1)).astype(np.int)
n_validation = np.diff(idx)
rolled_set = np.roll(full_dataset[u], -idx[i], axis=0)
training_dataset[u] = rolled_set[n_validation[i]:, :]
validation_dataset[u] = rolled_set[:n_validation[i], :]
# reset global data
Global.ready = False
templates = {u: build_template(u, training_dataset) for u in training_dataset}
# For each user test authentication.
true_accept = 0
false_reject = 0
true_reject = 0
false_accept = 0
for u in training_dataset:
# Test false rejections.
(score, threshold) = authenticate(validation_dataset[u], u, templates)
true_accept += np.sum(score > threshold)
false_reject += np.sum(score <= threshold)
# Test false acceptance.
for u_attacker in validation_dataset:
if u == u_attacker:
continue
(score, threshold) = authenticate(validation_dataset[u_attacker], u, templates)
false_accept += np.sum(score > threshold)
true_reject += np.sum(score <= threshold)
if print_results:
print "fold %i: false reject rate: %.1f%%, false accept rate: %.1f%%" %\
(i, 100. * float(false_reject) / (false_reject + true_accept),
100. * float(false_accept) / (false_accept + true_reject))
all_false_accept += false_accept
all_false_reject += false_reject
all_true_accept += true_accept
all_true_reject += true_reject
false_reject_percent = 100. * float(all_false_reject) / (all_false_reject + all_true_accept)
false_accept_percent = 100. * float(all_false_accept) / (all_false_accept + all_true_reject)
if print_results:
print "Total: false reject rate: %.1f%%, false accept rate: %.1f%%" % (false_reject_percent, false_accept_percent)
return false_reject_percent, false_accept_percent
if __name__ == "__main__":
# Reading the data into the training dataset separated by user.
data_training_file = open('dataset_training.csv', 'rb')
csv_training_reader = csv.reader(data_training_file, delimiter=',', quotechar='"')
csv_training_reader.next()
full_dataset = dict()
for row in csv_training_reader:
if row[0] not in full_dataset:
full_dataset[row[0]] = np.array([]).reshape((0, len(row[1:])))
full_dataset[row[0]] = np.vstack([full_dataset[row[0]], np.array(row[1:]).astype(float)])
for feature_fraction in [0.4]:
for n_lda_ensemble in [51]:
n_trials = 10
tot_rej = 0
tot_acc = 0
for _ in range(n_trials):
MetaParams.feature_fraction = feature_fraction
MetaParams.n_lda_ensemble = n_lda_ensemble
rej, acc = cross_validate(full_dataset)
tot_rej += rej
tot_acc += acc
print "feature fraction=%.2f, ensemble size=%i, false_rej=%.2f%%, false_acc=%.2f%%" % (feature_fraction, n_lda_ensemble, tot_rej / n_trials, tot_acc / n_trials)
| mit |
planetarymike/IDL-Colorbars | IDL_py_test/027_Eos_B.py | 1 | 5942 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[1., 1., 1.],
[1., 1., 1.],
[0.498039, 0.498039, 0.498039],
[0., 0., 0.513725],
[0., 0., 0.533333],
[0., 0., 0.54902],
[0., 0., 0.564706],
[0., 0., 0.580392],
[0., 0., 0.6],
[0., 0., 0.615686],
[0., 0., 0.568627],
[0., 0., 0.584314],
[0., 0., 0.666667],
[0., 0., 0.682353],
[0., 0., 0.698039],
[0., 0., 0.713725],
[0., 0., 0.733333],
[0., 0., 0.74902],
[0., 0., 0.764706],
[0., 0., 0.780392],
[0., 0., 0.717647],
[0., 0., 0.733333],
[0., 0., 0.831373],
[0., 0., 0.847059],
[0., 0., 0.866667],
[0., 0., 0.882353],
[0., 0., 0.898039],
[0., 0., 0.913725],
[0., 0., 0.933333],
[0., 0., 0.94902],
[0., 0., 0.866667],
[0., 0., 0.882353],
[0., 0., 1.],
[0., 0.027451, 0.968627],
[0., 0.0588235, 0.937255],
[0., 0.0901961, 0.905882],
[0., 0.121569, 0.87451],
[0., 0.152941, 0.843137],
[0., 0.184314, 0.811765],
[0., 0.215686, 0.780392],
[0., 0.223529, 0.67451],
[0., 0.25098, 0.643137],
[0., 0.309804, 0.686275],
[0., 0.341176, 0.654902],
[0., 0.372549, 0.623529],
[0., 0.403922, 0.592157],
[0., 0.435294, 0.560784],
[0., 0.466667, 0.529412],
[0., 0.498039, 0.498039],
[0., 0.529412, 0.466667],
[0., 0.505882, 0.392157],
[0., 0.533333, 0.364706],
[0., 0.623529, 0.372549],
[0., 0.654902, 0.341176],
[0., 0.686275, 0.309804],
[0., 0.717647, 0.278431],
[0., 0.74902, 0.247059],
[0., 0.780392, 0.215686],
[0., 0.811765, 0.184314],
[0., 0.843137, 0.152941],
[0., 0.784314, 0.109804],
[0., 0.811765, 0.0823529],
[0., 0.937255, 0.0588235],
[0., 0.968627, 0.027451],
[0., 1., 0.],
[0.0352941, 1., 0.],
[0.0705882, 1., 0.],
[0.105882, 1., 0.],
[0.141176, 1., 0.],
[0.176471, 1., 0.],
[0.192157, 0.898039, 0.],
[0.223529, 0.898039, 0.],
[0.282353, 1., 0.],
[0.317647, 1., 0.],
[0.356863, 1., 0.],
[0.392157, 1., 0.],
[0.427451, 1., 0.],
[0.462745, 1., 0.],
[0.498039, 1., 0.],
[0.533333, 1., 0.],
[0.513725, 0.898039, 0.],
[0.545098, 0.898039, 0.],
[0.639216, 1., 0.],
[0.678431, 1., 0.],
[0.713725, 1., 0.],
[0.74902, 1., 0.],
[0.784314, 1., 0.],
[0.819608, 1., 0.],
[0.854902, 1., 0.],
[0.890196, 1., 0.],
[0.835294, 0.898039, 0.],
[0.866667, 0.898039, 0.],
[1., 1., 0.],
[1., 0.980392, 0.],
[1., 0.964706, 0.],
[1., 0.94902, 0.],
[1., 0.933333, 0.],
[1., 0.913725, 0.],
[1., 0.898039, 0.],
[1., 0.882353, 0.],
[0.898039, 0.776471, 0.],
[0.898039, 0.764706, 0.],
[1., 0.831373, 0.],
[1., 0.815686, 0.],
[1., 0.8, 0.],
[1., 0.780392, 0.],
[1., 0.764706, 0.],
[1., 0.74902, 0.],
[1., 0.733333, 0.],
[1., 0.713725, 0.],
[0.898039, 0.627451, 0.],
[0.898039, 0.611765, 0.],
[1., 0.662745, 0.],
[1., 0.647059, 0.],
[1., 0.631373, 0.],
[1., 0.615686, 0.],
[1., 0.6, 0.],
[1., 0.580392, 0.],
[1., 0.564706, 0.],
[1., 0.54902, 0.],
[0.898039, 0.478431, 0.],
[0.898039, 0.462745, 0.],
[1., 0.498039, 0.],
[1., 0.490196, 0.],
[1., 0.482353, 0.],
[1., 0.47451, 0.],
[1., 0.466667, 0.],
[1., 0.454902, 0.],
[1., 0.447059, 0.],
[1., 0.439216, 0.],
[0.898039, 0.388235, 0.],
[0.898039, 0.380392, 0.],
[1., 0.415686, 0.],
[1., 0.407843, 0.],
[1., 0.4, 0.],
[1., 0.388235, 0.],
[1., 0.380392, 0.],
[1., 0.372549, 0.],
[1., 0.364706, 0.],
[1., 0.356863, 0.],
[0.898039, 0.313725, 0.],
[0.898039, 0.305882, 0.],
[1., 0.329412, 0.],
[1., 0.321569, 0.],
[1., 0.313725, 0.],
[1., 0.305882, 0.],
[1., 0.298039, 0.],
[1., 0.290196, 0.],
[1., 0.282353, 0.],
[1., 0.27451, 0.],
[0.898039, 0.239216, 0.],
[0.898039, 0.231373, 0.],
[1., 0.247059, 0.],
[1., 0.239216, 0.],
[1., 0.231373, 0.],
[1., 0.223529, 0.],
[1., 0.215686, 0.],
[1., 0.207843, 0.],
[1., 0.196078, 0.],
[1., 0.188235, 0.],
[0.898039, 0.164706, 0.],
[0.898039, 0.156863, 0.],
[1., 0.164706, 0.],
[1., 0.156863, 0.],
[1., 0.14902, 0.],
[1., 0.141176, 0.],
[1., 0.129412, 0.],
[1., 0.121569, 0.],
[1., 0.113725, 0.],
[1., 0.105882, 0.],
[0.898039, 0.0862745, 0.],
[0.898039, 0.0823529, 0.],
[1., 0.0823529, 0.],
[1., 0.0745098, 0.],
[1., 0.0627451, 0.],
[1., 0.054902, 0.],
[1., 0.0470588, 0.],
[1., 0.0509804, 0.],
[1., 0.0313725, 0.],
[1., 0.0235294, 0.],
[0.898039, 0.0117647, 0.],
[0.898039, 0.00392157, 0.],
[1., 0., 0.],
[0.992157, 0., 0.],
[0.984314, 0., 0.],
[0.976471, 0., 0.],
[0.968627, 0., 0.],
[0.960784, 0., 0.],
[0.952941, 0., 0.],
[0.945098, 0., 0.],
[0.843137, 0., 0.],
[0.839216, 0., 0.],
[0.921569, 0., 0.],
[0.917647, 0., 0.],
[0.909804, 0., 0.],
[0.901961, 0., 0.],
[0.894118, 0., 0.],
[0.886275, 0., 0.],
[0.878431, 0., 0.],
[0.870588, 0., 0.],
[0.776471, 0., 0.],
[0.768627, 0., 0.],
[0.847059, 0., 0.],
[0.843137, 0., 0.],
[0.835294, 0., 0.],
[0.827451, 0., 0.],
[0.819608, 0., 0.],
[0.811765, 0., 0.],
[0.803922, 0., 0.],
[0.796078, 0., 0.],
[0.709804, 0., 0.],
[0.701961, 0., 0.],
[0.772549, 0., 0.],
[0.768627, 0., 0.],
[0.760784, 0., 0.],
[0.752941, 0., 0.],
[0.745098, 0., 0.],
[0.737255, 0., 0.],
[0.729412, 0., 0.],
[0.721569, 0., 0.],
[0.643137, 0., 0.],
[0.635294, 0., 0.],
[0.698039, 0., 0.],
[0.690196, 0., 0.],
[0.686275, 0., 0.],
[0.678431, 0., 0.],
[0.670588, 0., 0.],
[0.662745, 0., 0.],
[0.654902, 0., 0.],
[0.647059, 0., 0.],
[0.576471, 0., 0.],
[0.568627, 0., 0.],
[0.623529, 0., 0.],
[0.615686, 0., 0.],
[0.611765, 0., 0.],
[0.603922, 0., 0.],
[0.596078, 0., 0.],
[0.588235, 0., 0.],
[0.580392, 0., 0.],
[0.572549, 0., 0.],
[0.509804, 0., 0.],
[0.501961, 0., 0.],
[0.54902, 0., 0.],
[0.541176, 0., 0.],
[0.537255, 0., 0.],
[0.529412, 0., 0.],
[0.521569, 0., 0.],
[0.513725, 0., 0.],
[0.505882, 0., 0.],
[0.498039, 0., 0.],
[0.443137, 0., 0.],
[0.435294, 0., 0.],
[0.47451, 0., 0.],
[0.466667, 0., 0.],
[0.458824, 0., 0.],
[0.458824, 0., 0.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
q1ang/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
BlueBrain/NEST | testsuite/manualtests/cross_check_test_mip_corrdet.py | 13 | 2594 | # -*- coding: utf-8 -*-
#
# cross_check_test_mip_corrdet.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Script to check correlation_detector.
# Calculates spike cross correlation function of both spike trains in
# spike_detector-0-0-3.gdf. The file is generated after running the
# testscript testsuite/unittests/test_mip_corrdet.sli
#
# Author: Helias
# Date: 08-04-07
#
from scipy import *
from matplotlib.pylab import * # for plot
# Auto- and crosscorrelation functions for spike trains.
#
# A time bin of size tbin is centered around the time difference it
# represents If the correlation function is calculated for tau in
# [-tau_max, tau_max], the pair events contributing to the left-most
# bin are those for which tau in [-tau_max-tbin/2, tau_max+tbin/2) and
# so on.
# correlate two spike trains with each other
# assumes spike times to be ordered in time
# tau > 0 means spike2 is later than spike1
#
# tau_max: maximum time lag in ms correlation function
# tbin: bin size
# spike1: first spike train [tspike...]
# spike2: second spike train [tspike...]
#
def corr_spikes_sorted(spike1, spike2, tbin, tau_max, h):
tau_max_i = int(tau_max/h)
tbin_i = int(tbin/h)
cross = zeros(int(2*tau_max_i/tbin_i+1), 'd')
j0 = 0
for spki in spike1:
j = j0
while j < len(spike2) and spike2[j] - spki < -tau_max_i - tbin_i/2.0:
j += 1
j0 = j
while j < len(spike2) and spike2[j] - spki < tau_max_i + tbin_i/2.0:
cross[int((spike2[j] - spki + tau_max_i + 0.5*tbin_i)/tbin_i)] += 1.0
j += 1
return cross
def main():
# resolution
h = 0.1
tau_max = 100.0 # ms correlation window
t_bin = 10.0 # ms bin size
# read input from spike detector
spikes = load('spike_detector-0-0-3.gdf')
sp1 = spikes[find(spikes[:,0] == 4), 1]
sp2 = spikes[find(spikes[:,0] == 5), 1]
cross = corr_spikes_sorted(sp1, sp2, t_bin, tau_max, h)
print cross
print sum(cross)
main()
| gpl-2.0 |
winklerand/pandas | pandas/tests/test_errors.py | 9 | 1147 | # -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
import pandas # noqa
import pandas as pd
@pytest.mark.parametrize(
"exc", ['UnsupportedFunctionCall', 'UnsortedIndexError',
'OutOfBoundsDatetime',
'ParserError', 'PerformanceWarning', 'DtypeWarning',
'EmptyDataError', 'ParserWarning', 'MergeError'])
def test_exception_importable(exc):
from pandas import errors
e = getattr(errors, exc)
assert e is not None
# check that we can raise on them
with pytest.raises(e):
raise e()
def test_catch_oob():
from pandas import errors
try:
pd.Timestamp('15000101')
except errors.OutOfBoundsDatetime:
pass
def test_error_rename():
# see gh-12665
from pandas.errors import ParserError
from pandas.io.common import CParserError
try:
raise CParserError()
except ParserError:
pass
try:
raise ParserError()
except CParserError:
pass
with catch_warnings(record=True):
try:
raise ParserError()
except pd.parser.CParserError:
pass
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
SamStudio8/scikit-bio | skbio/io/format/ordination.py | 8 | 14555 | r"""
Ordination results format (:mod:`skbio.io.format.ordination`)
=============================================================
.. currentmodule:: skbio.io.format.ordination
The ordination results file format (``ordination``) stores the results of an
ordination method in a human-readable, text-based format. The format supports
storing the results of various ordination methods available in scikit-bio,
including (but not necessarily limited to) PCoA, CA, RDA, and CCA.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.stats.ordination.OrdinationResults` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
The format is text-based, consisting of six attributes that describe the
ordination results:
- ``Eigvals``: 1-D
- ``Proportion explained``: 1-D
- ``Species``: 2-D
- ``Site``: 2-D
- ``Biplot``: 2-D
- ``Site constraints``: 2-D
The attributes in the file *must* be in this order.
Each attribute is defined in its own section of the file, where sections are
separated by a blank (or whitespace-only) line. Each attribute begins with a
header line, which contains the attribute's name (as listed above), followed by
a tab character, followed by one or more tab-separated dimensions (integers)
that describe the shape of the attribute's data.
The attribute's data follows its header line, and is stored in tab-separated
format. ``Species``, ``Site``, and ``Site constraints`` store species and site
IDs, respectively, as the first column, followed by the 2-D data array.
An example of this file format might look like::
Eigvals<tab>4
0.36<tab>0.18<tab>0.07<tab>0.08
Proportion explained<tab>4
0.46<tab>0.23<tab>0.10<tab>0.10
Species<tab>9<tab>4
Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00
Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14
Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10
Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22
Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22
Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38
Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43
Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05
Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69
Site<tab>10<tab>4
Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24
Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69
Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11
Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66
Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61
Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28
Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42
Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00
Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17
Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28
Biplot<tab>3<tab>3
-0.16<tab>0.63<tab>0.76
-0.99<tab>0.06<tab>-0.04
0.18<tab>-0.97<tab>0.03
Site constraints<tab>10<tab>4
Site0<tab>0.69<tab>-3.08<tab>-0.32<tab>-1.24
Site1<tab>0.66<tab>-3.06<tab>0.23<tab>2.69
Site2<tab>0.63<tab>-3.04<tab>0.78<tab>-3.11
Site3<tab>1.10<tab>0.50<tab>-1.55<tab>0.66
Site4<tab>-0.97<tab>0.06<tab>-1.12<tab>-0.61
Site5<tab>1.05<tab>0.53<tab>-0.43<tab>0.28
Site6<tab>-1.02<tab>0.10<tab>-0.00<tab>-0.42
Site7<tab>0.99<tab>0.57<tab>0.67<tab>-0.00
Site8<tab>-1.08<tab>0.13<tab>1.11<tab>1.17
Site9<tab>0.94<tab>0.61<tab>1.79<tab>-1.28
If a given result attribute is not present (e.g. ``Biplot``), it should still
be defined and declare its dimensions as 0. For example::
Biplot<tab>0<tab>0
All attributes are optional except for ``Eigvals``.
Examples
--------
Assume we have the following tab-delimited text file storing the
ordination results in ``ordination`` format::
Eigvals<tab>4
0.36<tab>0.18<tab>0.07<tab>0.08
Proportion explained<tab>4
0.46<tab>0.23<tab>0.10<tab>0.10
Species<tab>9<tab>4
Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00
Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14
Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10
Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22
Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22
Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38
Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43
Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05
Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69
Site<tab>10<tab>4
Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24
Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69
Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11
Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66
Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61
Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28
Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42
Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00
Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17
Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28
Biplot<tab>0<tab>0
Site constraints<tab>0<tab>0
Load the ordination results from the file:
>>> from io import StringIO
>>> from skbio import OrdinationResults
>>> or_f = StringIO(
... "Eigvals\t4\n"
... "0.36\t0.18\t0.07\t0.08\n"
... "\n"
... "Proportion explained\t4\n"
... "0.46\t0.23\t0.10\t0.10\n"
... "\n"
... "Species\t9\t4\n"
... "Species0\t0.11\t0.28\t-0.20\t-0.00\n"
... "Species1\t0.14\t0.30\t0.39\t-0.14\n"
... "Species2\t-1.01\t0.09\t-0.19\t-0.10\n"
... "Species3\t-1.03\t0.10\t0.22\t0.22\n"
... "Species4\t1.05\t0.53\t-0.43\t0.22\n"
... "Species5\t0.99\t0.57\t0.67\t-0.38\n"
... "Species6\t0.25\t-0.17\t-0.20\t0.43\n"
... "Species7\t0.14\t-0.85\t-0.01\t0.05\n"
... "Species8\t0.41\t-0.70\t0.21\t-0.69\n"
... "\n"
... "Site\t10\t4\n"
... "Site0\t0.71\t-3.08\t0.21\t-1.24\n"
... "Site1\t0.58\t-3.00\t-0.94\t2.69\n"
... "Site2\t0.76\t-3.15\t2.13\t-3.11\n"
... "Site3\t1.11\t1.07\t-1.87\t0.66\n"
... "Site4\t-0.97\t-0.06\t-0.69\t-0.61\n"
... "Site5\t1.04\t0.45\t-0.63\t0.28\n"
... "Site6\t-0.95\t-0.08\t0.13\t-0.42\n"
... "Site7\t0.94\t-0.10\t0.52\t-0.00\n"
... "Site8\t-1.14\t0.49\t0.47\t1.17\n"
... "Site9\t1.03\t1.03\t2.74\t-1.28\n"
... "\n"
... "Biplot\t0\t0\n"
... "\n"
... "Site constraints\t0\t0\n")
>>> ord_res = OrdinationResults.read(or_f)
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import zip
import numpy as np
import pandas as pd
from skbio._base import OrdinationResults
from skbio.io import create_format, OrdinationFormatError
ordination = create_format('ordination')
@ordination.sniffer()
def _ordination_sniffer(fh):
# Smells an ordination file if *all* of the following lines are present
# *from the beginning* of the file:
# - eigvals header (minimally parsed)
# - another line (contents ignored)
# - a whitespace-only line
# - proportion explained header (minimally parsed)
try:
_parse_header(fh, 'Eigvals', 1)
next_line = next(fh, None)
if next_line is not None:
_check_empty_line(fh)
_parse_header(fh, 'Proportion explained', 1)
return True, {}
except OrdinationFormatError:
pass
return False, {}
@ordination.reader(OrdinationResults)
def _ordination_to_ordination_results(fh):
eigvals = _parse_vector_section(fh, 'Eigvals')
if eigvals is None:
raise OrdinationFormatError("At least one eigval must be present.")
_check_empty_line(fh)
prop_expl = _parse_vector_section(fh, 'Proportion explained')
_check_length_against_eigvals(prop_expl, eigvals,
'proportion explained values')
_check_empty_line(fh)
species = _parse_array_section(fh, 'Species')
_check_length_against_eigvals(species, eigvals,
'coordinates per species')
_check_empty_line(fh)
site = _parse_array_section(fh, 'Site')
_check_length_against_eigvals(site, eigvals,
'coordinates per site')
_check_empty_line(fh)
# biplot does not have ids to parse (the other arrays do)
biplot = _parse_array_section(fh, 'Biplot', has_ids=False)
_check_empty_line(fh)
cons = _parse_array_section(fh, 'Site constraints')
if cons is not None and site is not None:
if not np.array_equal(cons.index, site.index):
raise OrdinationFormatError(
"Site constraints ids and site ids must be equal: %s != %s" %
(cons.index, site.index))
return OrdinationResults(
short_method_name='', long_method_name='', eigvals=eigvals,
features=species, samples=site, biplot_scores=biplot,
sample_constraints=cons, proportion_explained=prop_expl)
def _parse_header(fh, header_id, num_dimensions):
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for %s header." % header_id)
header = line.strip().split('\t')
# +1 for the header ID
if len(header) != num_dimensions + 1 or header[0] != header_id:
raise OrdinationFormatError("%s header not found." % header_id)
return header
def _check_empty_line(fh):
"""Check that the next line in `fh` is empty or whitespace-only."""
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for blank line separating "
"sections.")
if line.strip():
raise OrdinationFormatError("Expected an empty line.")
def _check_length_against_eigvals(data, eigvals, label):
if data is not None:
num_vals = data.shape[-1]
num_eigvals = eigvals.shape[-1]
if num_vals != num_eigvals:
raise OrdinationFormatError(
"There should be as many %s as eigvals: %d != %d" %
(label, num_vals, num_eigvals))
def _parse_vector_section(fh, header_id):
header = _parse_header(fh, header_id, 1)
# Parse how many values we are waiting for
num_vals = int(header[1])
if num_vals == 0:
# The ordination method didn't generate the vector, so set it to None
vals = None
else:
# Parse the line with the vector values
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for line containing values "
"for %s section." % header_id)
vals = pd.Series(np.asarray(line.strip().split('\t'),
dtype=np.float64))
if len(vals) != num_vals:
raise OrdinationFormatError(
"Expected %d values in %s section, but found %d." %
(num_vals, header_id, len(vals)))
return vals
def _parse_array_section(fh, header_id, has_ids=True):
"""Parse an array section of `fh` identified by `header_id`."""
# Parse the array header
header = _parse_header(fh, header_id, 2)
# Parse the dimensions of the array
rows = int(header[1])
cols = int(header[2])
ids = None
if rows == 0 and cols == 0:
# The ordination method didn't generate the array data for 'header', so
# set it to None
data = None
elif rows == 0 or cols == 0:
# Both dimensions should be 0 or none of them are zero
raise OrdinationFormatError("One dimension of %s is 0: %d x %d" %
(header_id, rows, cols))
else:
# Parse the data
data = np.empty((rows, cols), dtype=np.float64)
if has_ids:
ids = []
for i in range(rows):
# Parse the next row of data
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for row %d in %s "
"section." % (i + 1, header_id))
vals = line.strip().split('\t')
if has_ids:
ids.append(vals[0])
vals = vals[1:]
if len(vals) != cols:
raise OrdinationFormatError(
"Expected %d values, but found %d in row %d." %
(cols, len(vals), i + 1))
data[i, :] = np.asarray(vals, dtype=np.float64)
data = pd.DataFrame(data, index=ids)
return data
@ordination.writer(OrdinationResults)
def _ordination_results_to_ordination(obj, fh):
_write_vector_section(fh, 'Eigvals', obj.eigvals)
_write_vector_section(fh, 'Proportion explained', obj.proportion_explained)
_write_array_section(fh, 'Species', obj.features)
_write_array_section(fh, 'Site', obj.samples)
_write_array_section(fh, 'Biplot', obj.biplot_scores, has_ids=False)
_write_array_section(fh, 'Site constraints', obj.sample_constraints,
include_section_separator=False)
def _write_vector_section(fh, header_id, vector):
if vector is None:
shape = 0
else:
shape = vector.shape[0]
fh.write("%s\t%d\n" % (header_id, shape))
if vector is not None:
fh.write(_format_vector(vector.values))
fh.write("\n")
def _write_array_section(fh, header_id, data, has_ids=True,
include_section_separator=True):
# write section header
if data is None:
shape = (0, 0)
else:
shape = data.shape
fh.write("%s\t%d\t%d\n" % (header_id, shape[0], shape[1]))
# write section data
if data is not None:
if not has_ids:
for vals in data.values:
fh.write(_format_vector(vals))
else:
for id_, vals in zip(data.index, data.values):
fh.write(_format_vector(vals, id_))
if include_section_separator:
fh.write("\n")
def _format_vector(vector, id_=None):
formatted_vector = '\t'.join(np.asarray(vector, dtype=np.str))
if id_ is None:
return "%s\n" % formatted_vector
else:
return "%s\t%s\n" % (id_, formatted_vector)
| bsd-3-clause |
latticelabs/Mitty | mitty/benchmarking/misalignment_plot.py | 1 | 9184 | """Prepare a binned matrix of misalignments and plot it in different ways"""
import click
import pysam
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
from matplotlib.colors import LogNorm
import numpy as np
def we_have_too_many_bins(bins):
return sum([len(bb) for bb in bins]) > 5000 # This is our threshold for too many bins to compute
def autoscale_bin_size(chrom_lens, bin_cnt=100.0):
return int(sum(chrom_lens) / bin_cnt)
def compute_misalignment_matrix_from_bam(bam_fp, bin_size=None, i_know_what_i_am_doing=False):
"""Create a matrix of binned mis-alignments
:param bam_fp: input BAM
:param bin_size: size of bin in mega bases
:param i_know_what_i_am_doing: Set this to override the runtime warning of too many bins
"""
def binnify(_pos, _bins):
for n in range(1, len(_bins)):
if _pos < _bins[n]:
return n - 1
return len(_bins) - 1 # Should not get here
chrom_lens = [hdr['LN'] for hdr in bam_fp.header['SQ']]
bin_size = bin_size * 1e6 if bin_size is not None else autoscale_bin_size(chrom_lens)
bins = [np.array(range(0, hdr['LN'], bin_size) + [hdr['LN']], dtype=int) for hdr in bam_fp.header['SQ']]
if not i_know_what_i_am_doing and we_have_too_many_bins(bins):
raise RuntimeWarning('The number of bins will be very large. '
'If you are sure you want to do this, '
'use the --i-know-what-i-am-doing flag.')
bin_centers = [(bb[:-1] + bb[1:]) / 2.0 for bb in bins]
# Rows = source (correct pos) Cols = destination (aligned pos)
matrices = [[np.zeros(shape=(len(bins[j]) - 1, len(bins[i]) - 1), dtype='uint32') for i in range(len(bins))] for j in range(len(bins))]
# TAG TYPE VALUE
# XR i Aligned chromosome
# XP i Aligned pos
for r in bam_fp:
c_chrom, c_pos, a_chrom, a_pos = r.reference_id, r.pos, r.get_tag('XR'), r.get_tag('XP')
c_pos_binned, a_pos_binned = binnify(c_pos, bins[c_chrom]), binnify(a_pos, bins[a_chrom])
matrices[c_chrom][a_chrom][c_pos_binned, a_pos_binned] += 1
return chrom_lens, bins, bin_centers, matrices
def plot_genome_as_a_circle(ax, chrom_lens, chrom_gap=np.pi / 50, chrom_radius=1.0, chrom_thick=5, r_max=1.05):
"""Plot the chromosomes on a circle."""
total_len = sum(chrom_lens)
radians_per_base = (2.0 * np.pi - len(chrom_lens) * chrom_gap) / total_len # With allowance for chrom gaps
theta_stops, x_ticks, x_tick_labels = [], [], []
delta_radian = 0.01
start_radian = 0
for ch_no, l in enumerate(chrom_lens):
end_radian = start_radian + l * radians_per_base
theta = np.arange(start_radian, end_radian, delta_radian)
theta_stops.append((start_radian, end_radian))
ax.plot(theta, [chrom_radius * 1.01] * theta.size, lw=chrom_thick, zorder=-1) # , color=[.3, .3, .3])
x_ticks.append((start_radian + end_radian)/2)
x_tick_labels.append(str(ch_no + 1))
start_radian = end_radian + chrom_gap
plt.setp(ax.get_yticklabels(), visible=False)
ax.grid(False)
plt.setp(ax, xticks=x_ticks, xticklabels=x_tick_labels)
ax.set_rmax(r_max)
return theta_stops
def plot_read_mis_alignments_on_a_circle(ax, chrom_lens, bins, bin_centers, matrices, theta_stops,
chrom_radius=1.0, scaling_factor=0.01):
scaling_factor *= 0.01
# http://matplotlib.org/users/path_tutorial.html
codes = [
Path.MOVETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
]
for i in range(len(bins)):
for j in range(len(bins)):
mat = matrices[i][j]
range_bp_origin, range_bp_dest = float(chrom_lens[i]), float(chrom_lens[j])
offset_origin, offset_dest = theta_stops[i][0], theta_stops[j][0]
range_origin, range_dest = theta_stops[i][1] - theta_stops[i][0], theta_stops[j][1] - theta_stops[j][0]
scale_origin, scale_dest = range_origin / range_bp_origin, range_dest / range_bp_dest
c_origin, c_dest = offset_origin + bin_centers[i] * scale_origin, offset_dest + bin_centers[j] * scale_dest
this_origin, this_dest = np.tile(c_origin, c_dest.shape[0]), np.repeat(c_dest, c_origin.shape[0])
mat_flat = mat.ravel()
idx, = mat_flat.nonzero()
for ii in idx:
t0, t1 = this_origin[ii], this_dest[ii]
this_radius = max(min(1.0, abs(t1 - t0) / np.pi), 0.05) * chrom_radius
vertices = [
(t0, chrom_radius), # P0
(t0, chrom_radius - this_radius), # P1
(t1, chrom_radius - this_radius), # P2
(t1, chrom_radius), # P3
]
path = Path(vertices, codes)
patch = patches.PathPatch(path, facecolor='none', lw=scaling_factor * mat_flat[ii])
ax.add_patch(patch)
def circle_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor):
"""Plot the confusion matrix as a circle plot."""
fig = plt.figure()
ax = fig.add_subplot(111, polar=True)
theta_stops = plot_genome_as_a_circle(ax, chrom_lens)
plot_read_mis_alignments_on_a_circle(ax, chrom_lens, bins, bin_centers, matrices, theta_stops, chrom_radius=1.0, scaling_factor=scaling_factor)
def plot_genome_as_a_square(ax, bins, chrom_gap=1000, chrom_thick=5):
"""Plot the chromosomes on a matrix."""
start_pos, linear_stops, x_ticks, x_tick_labels = chrom_gap, [], [], []
for ch_no, b in enumerate(bins):
linear_stops.append([start_pos, start_pos + b[-1]])
ax.plot([x + start_pos for x in b], [0 for _ in b], color='k' if ch_no % 2 else 'gray', lw=chrom_thick, zorder=-1)
ax.plot([0 for _ in b], [x + start_pos for x in b], color='k' if ch_no % 2 else 'gray', lw=chrom_thick, zorder=-1)
x_ticks.append((start_pos + start_pos + b[-1]) / 2)
x_tick_labels.append(str(ch_no + 1))
start_pos += b[-1] + chrom_gap
#plt.setp(ax.get_yticklabels(), visible=False)
ax.grid(False)
plt.setp(ax, xticks=x_ticks, xticklabels=x_tick_labels, yticks=x_ticks, yticklabels=x_tick_labels)
return linear_stops
def plot_read_mis_alignments_as_a_matrix(ax, chrom_lens, bins, bin_centers, matrices, linear_stops,
scaling_factor=1.0, plot_grid=True):
for i in range(len(bins)):
for j in range(len(bins)):
mat = matrices[i][j]
range_bp_x, range_bp_y = float(chrom_lens[i]), float(chrom_lens[j])
offset_x, offset_y = linear_stops[i][0], linear_stops[j][0]
range_x, range_y = linear_stops[i][1] - linear_stops[i][0], linear_stops[j][1] - linear_stops[j][0]
scale_x, scale_y = range_x / range_bp_x, range_y / range_bp_y
cx, cy = offset_x + bin_centers[i] * scale_x, offset_y + bin_centers[j] * scale_y
this_x, this_y = np.tile(cx, cy.shape[0]), np.repeat(cy, cx.shape[0])
if plot_grid: ax.plot(this_x, this_y, '.', color=(0.8, 0.8, 0.8), ms=2, zorder=-1)
mat_flat = mat.ravel()
idx, = mat_flat.nonzero()
if idx.size > 0:
ax.scatter(this_x[idx], this_y[idx], mat_flat[idx] * scaling_factor, facecolors='none')
def matrix_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor, plot_grid=True):
"""Plot the confusion matrix as a ... matrix."""
fig = plt.figure()
ax = fig.add_subplot(111)
linear_stops = plot_genome_as_a_square(ax, bins, chrom_gap=max(chrom_lens) * 0.1)
plot_read_mis_alignments_as_a_matrix(ax, chrom_lens, bins, bin_centers, matrices, linear_stops,
scaling_factor=scaling_factor, plot_grid=plot_grid)
plt.setp(ax, aspect=1, xlabel='Correct', ylabel='Aligned')
def is_grid_too_dense(bins):
return sum([len(bb) for bb in bins]) > 100 # This is our threshold for too dense a grid to show
def auto_scale_scaling_factor(matrices, scale=1000.0):
return scale / max([matrices[i][j].max() for i in range(len(matrices)) for j in range(len(matrices[i]))])
@click.command()
@click.argument('badbam', type=click.Path(exists=True))
@click.option('--circle', type=click.Path(), help='Name of figure file for circle plot')
@click.option('--matrix', type=click.Path(), help='Name of figure file for matrix plot')
@click.option('--bin-size', type=float, default=None, help='Bin size in Mb. Omit to auto-scale')
@click.option('--scaling-factor', type=float, default=None, help='Scale size of disks/lines in plot. Omit to auto-scale')
@click.option('--i-know-what-i-am-doing', is_flag=True, help='Override bin density safety')
def cli(badbam, circle, matrix, bin_size, scaling_factor, i_know_what_i_am_doing):
"""Prepare a binned matrix of mis-alignments and plot it in different ways"""
chrom_lens, bins, bin_centers, matrices = \
compute_misalignment_matrix_from_bam(pysam.AlignmentFile(badbam, 'rb'),
bin_size=bin_size, i_know_what_i_am_doing=i_know_what_i_am_doing)
scaling_factor = scaling_factor or auto_scale_scaling_factor(matrices)
if circle is not None:
circle_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor)
plt.savefig(circle)
if matrix is not None:
matrix_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor,
plot_grid=not is_grid_too_dense(bins))
plt.savefig(matrix)
if __name__ == '__main__':
cli() | gpl-2.0 |
petebachant/CFT-vectors | cft_vectors.py | 1 | 18584 | #!/usr/bin/env python
"""
This script generates a force and velocity vector diagram for a cross-flow
turbine.
"""
from __future__ import division, print_function
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from scipy.interpolate import interp1d
import seaborn as sns
from pxl.styleplot import set_sns
import os
# Define some colors (some from the Seaborn deep palette)
blue = sns.color_palette()[0]
green = sns.color_palette()[1]
dark_gray = (0.3, 0.3, 0.3)
red = sns.color_palette()[2]
purple = sns.color_palette()[3]
tan = sns.color_palette()[4]
light_blue = sns.color_palette()[5]
def load_foildata():
"""Loads NACA 0020 airfoil data at Re = 2.1 x 10^5."""
Re = 2.1e5
foil = "0020"
fname = "NACA {}_T1_Re{:.3f}_M0.00_N9.0.dat".format(foil, Re/1e6)
fpath = "data/{}".format(fname)
alpha, cl, cd = np.loadtxt(fpath, skiprows=14, unpack=True)
if alpha[0] != 0.0:
alpha = np.append([0.0], alpha[:-1])
cl = np.append([1e-12], cl[:-1])
cd = np.append(cd[0], cd[:-1])
# Mirror data about 0 degrees AoA since it's a symmetrical foil
alpha = np.append(-np.flipud(alpha), alpha)
cl = np.append(-np.flipud(cl), cl)
cd = np.append(np.flipud(cd), cd)
df = pd.DataFrame()
df["alpha_deg"] = alpha
df["cl"] = cl
df["cd"] = cd
return df
def lookup_foildata(alpha_deg):
"""Lookup foil characteristics at given angle of attack."""
alpha_deg = np.asarray(alpha_deg)
df = load_foildata()
df["alpha_rad"] = np.deg2rad(df.alpha_deg)
f_cl = interp1d(df.alpha_deg, df.cl, bounds_error=False)
f_cd = interp1d(df.alpha_deg, df.cd, bounds_error=False)
f_ct = interp1d(df.alpha_deg, df.cl*np.sin(df.alpha_rad) \
- df.cd*np.cos(df.alpha_rad), bounds_error=False)
cl, cd, ct = f_cl(alpha_deg), f_cd(alpha_deg), f_ct(alpha_deg)
return {"cl": cl, "cd": cd, "ct": ct}
def calc_cft_ctorque(tsr=2.0, chord=0.14, R=0.5):
"""Calculate the geometric torque coefficient for a CFT."""
U_infty = 1.0
omega = tsr*U_infty/R
theta_blade_deg = np.arange(0, 721)
theta_blade_rad = np.deg2rad(theta_blade_deg)
blade_vel_mag = omega*R
blade_vel_x = blade_vel_mag*np.cos(theta_blade_rad)
blade_vel_y = blade_vel_mag*np.sin(theta_blade_rad)
u = U_infty # No induction
rel_vel_mag = np.sqrt((blade_vel_x + u)**2 + blade_vel_y**2)
rel_vel_x = u + blade_vel_x
rel_vel_y = blade_vel_y
relvel_dot_bladevel = (blade_vel_x*rel_vel_x + blade_vel_y*rel_vel_y)
alpha_rad = np.arccos(relvel_dot_bladevel/(rel_vel_mag*blade_vel_mag))
alpha_rad[theta_blade_deg > 180] *= -1
alpha_deg = np.rad2deg(alpha_rad)
foil_coeffs = lookup_foildata(alpha_deg)
ctorque = foil_coeffs["ct"]*chord/(2*R)*rel_vel_mag**2/U_infty**2
cdx = -foil_coeffs["cd"]*np.sin(np.pi/2 - alpha_rad + theta_blade_rad)
clx = foil_coeffs["cl"]*np.cos(np.pi/2 - alpha_rad - theta_blade_rad)
df = pd.DataFrame()
df["theta"] = theta_blade_deg
df["alpha_deg"] = alpha_deg
df["rel_vel_mag"] = rel_vel_mag
df["ctorque"] = ctorque
df["cdrag"] = clx + cdx
return df
def mag(v):
"""
Return magnitude of 2-D vector (input as a tuple, list, or NumPy array).
"""
return np.sqrt(v[0]**2 + v[1]**2)
def rotate(v, rad):
"""Rotate a 2-D vector by rad radians."""
dc, ds = np.cos(rad), np.sin(rad)
x, y = v[0], v[1]
x, y = dc*x - ds*y, ds*x + dc*y
return np.array((x, y))
def gen_naca_points(naca="0020", c=100, npoints=100, tuples=True):
"""Generate points for a NACA foil."""
x = np.linspace(0, 1, npoints)*c
t = float(naca[2:])/100.0
y = 5.0*t*c*(0.2969*np.sqrt(x/c) - 0.1260*(x/c) - 0.3516*(x/c)**2 \
+ 0.2843*(x/c)**3 - 0.1015*(x/c)**4)
y = np.append(y, -y[::-1])
x = np.append(x, x[::-1])
if tuples:
return np.array([(x0, y0) for x0, y0 in zip(x, y)])
else:
return x, y
def test_gen_naca_points():
points = gen_naca_points()
x = []
y = []
for p in points:
x.append(p[0])
y.append(p[1])
fig, ax = plt.subplots()
ax.plot(x, y, "o")
ax.set_aspect(1)
plt.show()
def plot_radius(ax, theta_deg=0):
"""Plot radius at given azimuthal angle."""
r = 0.495
theta_rad = np.deg2rad(theta_deg)
x2, y2 = r*np.cos(theta_rad), r*np.sin(theta_rad)
ax.plot((0, x2), (0, y2), "gray", linewidth=2)
def plot_center(ax, length=0.07, linewidth=1.2):
"""Plot centermark at origin."""
ax.plot((0, 0), (-length/2, length/2), lw=linewidth, color="black")
ax.plot((-length/2, length/2), (0, 0), lw=linewidth, color="black")
def make_naca_path(c=0.3, theta_deg=0.0):
verts = gen_naca_points(c=c)
verts = np.array([rotate(v, -np.pi/2) for v in verts])
verts += (0.5, c/4)
theta_rad = np.deg2rad(theta_deg)
verts = np.array([rotate(v, theta_rad) for v in verts])
p = matplotlib.path.Path(verts, closed=True)
return p
def plot_foil(ax, c=0.3, theta_deg=0.0):
"""Plot the foil shape using a matplotlib patch."""
p = matplotlib.patches.PathPatch(make_naca_path(c, theta_deg),
facecolor="gray", linewidth=1,
edgecolor="gray")
ax.add_patch(p)
def plot_blade_path(ax, R=0.5):
"""Plot blade path as a dashed line."""
p = plt.Circle((0, 0), R, linestyle="dashed", edgecolor="black",
facecolor="none", linewidth=1)
ax.add_patch(p)
def plot_vectors(fig, ax, theta_deg=0.0, tsr=2.0, c=0.3, label=False):
"""Plot blade velocity, free stream velocity, relative velocity, lift, and
drag vectors.
"""
r = 0.5
u_infty = 0.26
theta_deg %= 360
theta_rad = np.deg2rad(theta_deg)
blade_xy = r*np.cos(theta_rad), r*np.sin(theta_rad)
head_width = 0.04
head_length = 0.11
linewidth = 1.5
# Function for plotting vector labels
def plot_label(text, x, y, dx, dy, text_width=0.09, text_height=0.03,
sign=-1, dist=1.0/3.0):
text_width *= plt.rcParams["font.size"]/12*6/fig.get_size_inches()[1]
text_height *= plt.rcParams["font.size"]/12*6/fig.get_size_inches()[1]
dvec = np.array((dx, dy))
perp_vec = rotate(dvec, np.pi/2)
perp_vec /= mag(perp_vec)
if theta_deg > 270:
diag = text_height
else:
diag = np.array((text_width, text_height))
# Projection of text diagonal vector onto normal vector
proj = np.dot(diag, perp_vec)
if sign != -1:
proj = 0 # Text is on right side of vector
if theta_deg > 180:
sign *= -1
dxlab, dylab = perp_vec*(np.abs(proj) + .01)*sign
xlab, ylab = x + dx*dist + dxlab, y + dy*dist + dylab
ax.text(xlab, ylab, text)
# Make blade velocity vector
x1, y1 = rotate((0.5, tsr*u_infty), np.deg2rad(theta_deg))
dx, dy = np.array(blade_xy) - np.array((x1, y1))
blade_vel = np.array((dx, dy))
ax.arrow(x1, y1, dx, dy, head_width=head_width, head_length=head_length,
length_includes_head=True, color=dark_gray, linewidth=linewidth)
if label:
plot_label(r"$-\omega r$", x1, y1, dx*0.25, dy*0.5)
# Make chord line vector
x1c, y1c = np.array((x1, y1)) - np.array((dx, dy))*0.5
x2c, y2c = np.array((x1, y1)) + np.array((dx, dy))*2
ax.plot([x1c, x2c], [y1c, y2c], marker=None, color="k", linestyle="-.",
zorder=1)
# Make free stream velocity vector
y1 += u_infty
ax.arrow(x1, y1, 0, -u_infty, head_width=head_width,
head_length=head_length, length_includes_head=True,
color=blue, linewidth=linewidth)
u_infty = np.array((0, -u_infty))
if label:
dy = -mag(u_infty)
plot_label(r"$U_\mathrm{in}$", x1, y1, 0, dy, text_width=0.1)
# Make relative velocity vector
dx, dy = np.array(blade_xy) - np.array((x1, y1))
rel_vel = u_infty + blade_vel
ax.plot((x1, x1 + dx), (y1, y1 + dy), lw=0)
ax.arrow(x1, y1, dx, dy, head_width=head_width, head_length=head_length,
length_includes_head=True, color=tan, linewidth=linewidth)
if label:
plot_label(r"$U_\mathrm{rel}$", x1, y1, dx, dy, sign=1,
text_width=0.11)
# Calculate angle between blade vel and rel vel
alpha_deg = np.rad2deg(np.arccos(np.dot(blade_vel/mag(blade_vel),
rel_vel/mag(rel_vel))))
if theta_deg > 180:
alpha_deg *= -1
# Make drag vector
drag_amplify = 3.0
data = lookup_foildata(alpha_deg)
drag = data["cd"]*mag(rel_vel)**2*drag_amplify
if drag < 0.4/drag_amplify:
hs = 0.5
else:
hs = 1
dx, dy = drag*np.array((dx, dy))/mag((dx, dy))
ax.arrow(blade_xy[0], blade_xy[1], dx, dy, head_width=head_width*hs,
head_length=head_length*hs, length_includes_head=True, color=red,
linewidth=linewidth)
if label:
plot_label(r"$F_d$", blade_xy[0], blade_xy[1], dx, dy, sign=-1,
dist=0.66)
# Make lift vector
lift_amplify = 1.5
lift = data["cl"]*mag(rel_vel)**2*lift_amplify
dx, dy = rotate((dx, dy), -np.pi/2)/mag((dx, dy))*lift
if np.abs(lift) < 0.4/lift_amplify:
hs = 0.5
else:
hs = 1
ax.plot((blade_xy[0], blade_xy[0] + dx), (blade_xy[1], blade_xy[1] + dy),
linewidth=0)
ax.arrow(blade_xy[0], blade_xy[1], dx, dy, head_width=head_width*hs,
head_length=head_length*hs, length_includes_head=True,
color=green, linewidth=linewidth)
if label:
plot_label(r"$F_l$", blade_xy[0], blade_xy[1], dx, dy, sign=-1,
text_width=0.12, text_height=0.02, dist=0.66)
# Label radius
if label:
plot_label("$r$", 0, 0, blade_xy[0], blade_xy[1], text_width=0.04,
text_height=0.04)
# Label angle of attack
if label:
ast = "simple,head_width={},tail_width={},head_length={}".format(
head_width*8, linewidth/16, head_length*8)
xy = blade_xy - rel_vel/mag(rel_vel)*0.2
ax.annotate(r"$\alpha$", xy=xy, xycoords="data",
xytext=(37.5, 22.5), textcoords="offset points",
arrowprops=dict(arrowstyle=ast,
ec="none",
connectionstyle="arc3,rad=0.1",
color="k"))
xy = blade_xy - blade_vel/mag(blade_vel)*0.2
ax.annotate("", xy=xy, xycoords="data",
xytext=(-15, -30), textcoords="offset points",
arrowprops=dict(arrowstyle=ast,
ec="none",
connectionstyle="arc3,rad=-0.1",
color="k"))
# Label azimuthal angle
if label:
xy = np.array(blade_xy)*0.6
ast = "simple,head_width={},tail_width={},head_length={}".format(
head_width*5.5, linewidth/22, head_length*5.5)
ax.annotate(r"$\theta$", xy=xy, xycoords="data",
xytext=(0.28, 0.12), textcoords="data",
arrowprops=dict(arrowstyle=ast,
ec="none",
connectionstyle="arc3,rad=0.1",
color="k"))
ax.annotate("", xy=(0.41, 0), xycoords="data",
xytext=(0.333, 0.12), textcoords="data",
arrowprops=dict(arrowstyle=ast,
ec="none",
connectionstyle="arc3,rad=-0.1",
color="k"))
# Label pitching moment
if label:
xy = np.array(blade_xy)*1.1 - blade_vel/mag(blade_vel) * c/4
ast = "simple,head_width={},tail_width={},head_length={}".format(
head_width*8, linewidth/16, head_length*8)
ax.annotate(r"", xy=xy, xycoords="data",
xytext=(25, -15), textcoords="offset points",
arrowprops=dict(arrowstyle=ast,
ec="none",
connectionstyle="arc3,rad=0.6",
color="k"))
plot_label(r"$M$", xy[0], xy[1], 0.1, 0.1, sign=-1, dist=0.66)
return {"u_infty": u_infty, "blade_vel": blade_vel, "rel_vel": rel_vel}
def plot_alpha(ax=None, tsr=2.0, theta=None, alpha_ss=None, **kwargs):
"""Plot angle of attack versus azimuthal angle."""
if theta is not None:
theta %= 360
if ax is None:
fig, ax = plt.subplots()
df = calc_cft_ctorque(tsr=tsr)
ax.plot(df.theta, df.alpha_deg, **kwargs)
ax.set_ylabel(r"$\alpha$ (degrees)")
ax.set_xlabel(r"$\theta$ (degrees)")
ax.set_xlim((0, 360))
ylim = np.round(df.alpha_deg.max() + 5)
ax.set_ylim((-ylim, ylim))
if theta is not None:
f = interp1d(df.theta, df.alpha_deg)
ax.plot(theta, f(theta), "ok")
if alpha_ss is not None:
ax.hlines((alpha_ss, -alpha_ss), 0, 360, linestyles="dashed")
def plot_rel_vel_mag(ax=None, tsr=2.0, theta=None, **kwargs):
"""Plot relative velocity magnitude versus azimuthal angle."""
if theta is not None:
theta %= 360
if ax is None:
fig, ax = plt.subplots()
df = calc_cft_ctorque(tsr=tsr)
ax.plot(df.theta, df.rel_vel_mag, **kwargs)
ax.set_ylabel(r"$|\vec{U}_\mathrm{rel}|$")
ax.set_xlabel(r"$\theta$ (degrees)")
ax.set_xlim((0, 360))
if theta is not None:
f = interp1d(df.theta, df.rel_vel_mag)
ax.plot(theta, f(theta), "ok")
def plot_alpha_relvel_all(tsrs=np.arange(1.5, 6.1, 1.0), save=False):
"""Plot angle of attack and relative velocity magnitude for a list of TSRs.
Figure will have two subplots in a single row.
"""
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(7.5, 3.0))
cm = plt.cm.get_cmap("Reds")
for tsr in tsrs:
color = cm(tsr/np.max(tsrs))
plot_alpha(ax=ax1, tsr=tsr, label=r"$\lambda = {}$".format(tsr),
color=color)
plot_rel_vel_mag(ax=ax2, tsr=tsr, color=color)
[a.set_xticks(np.arange(0, 361, 60)) for a in (ax1, ax2)]
ax1.legend(loc=(0.17, 1.1), ncol=len(tsrs))
ax1.set_ylim((-45, 45))
ax1.set_yticks(np.arange(-45, 46, 15))
ax2.set_ylabel(r"$|\vec{U}_\mathrm{rel}|/U_\infty$")
fig.tight_layout()
if save:
fig.savefig("figures/alpha_deg_urel_geom.pdf", bbox_inches="tight")
def plot_ctorque(ax=None, tsr=2.0, theta=None, **kwargs):
"""Plot torque coefficient versus azimuthal angle."""
theta %= 360
if ax is None:
fig, ax = plt.subplots()
df = calc_cft_ctorque(tsr=tsr)
ax.plot(df.theta, df.ctorque, **kwargs)
ax.set_ylabel("Torque coeff.")
ax.set_xlabel(r"$\theta$ (degrees)")
ax.set_xlim((0, 360))
if theta is not None:
f = interp1d(df.theta, df.ctorque)
ax.plot(theta, f(theta), "ok")
def plot_diagram(fig=None, ax=None, theta_deg=0.0, tsr=2.0, label=False,
save=False, axis="on", full_view=True):
"""Plot full vector diagram."""
if ax is None:
fig, ax = plt.subplots(figsize=(6, 6))
plot_blade_path(ax)
if label:
# Create dashed line for x-axis
ax.plot((-0.5, 0.5), (0, 0), linestyle="dashed", color="k",
zorder=1)
plot_foil(ax, c=0.3, theta_deg=theta_deg)
plot_radius(ax, theta_deg)
plot_center(ax)
plot_vectors(fig, ax, theta_deg, tsr, label=label)
# Figure formatting
if full_view:
ax.set_xlim((-1, 1))
ax.set_ylim((-1, 1))
ax.set_aspect(1)
ax.set_xticks([])
ax.set_yticks([])
ax.axis(axis)
if save:
fig.savefig("figures/cft-vectors.pdf")
def plot_all(theta_deg=0.0, tsr=2.0, scale=1.0, full_view=True):
"""Create diagram and plots of kinematics in a single figure."""
fig = plt.figure(figsize=(7.5*scale, 4.75*scale))
# Draw vector diagram
ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=2, rowspan=3)
plot_diagram(fig, ax1, theta_deg, tsr, axis="on", full_view=full_view)
# Plot angle of attack
ax2 = plt.subplot2grid((3, 3), (0, 2))
plot_alpha(ax2, tsr=tsr, theta=theta_deg, alpha_ss=18, color=light_blue)
# Plot relative velocity magnitude
ax3 = plt.subplot2grid((3, 3), (1, 2))
plot_rel_vel_mag(ax3, tsr=tsr, theta=theta_deg, color=tan)
# Plot torque coefficient
ax4 = plt.subplot2grid((3, 3), (2, 2))
plot_ctorque(ax4, tsr=tsr, theta=theta_deg, color=purple)
fig.tight_layout()
return fig
def make_frame(t):
"""Make a frame for a movie."""
sec_per_rev = 5.0
deg = t/sec_per_rev*360
return mplfig_to_npimage(plot_all(deg, scale=2.0))
def make_animation(filetype="mp4", fps=30):
"""Make animation video."""
if not os.path.isdir("videos"):
os.mkdir("videos")
animation = VideoClip(make_frame, duration=5.0)
if "mp4" in filetype.lower():
animation.write_videofile("videos/cft-animation.mp4", fps=fps)
elif "gif" in filetype.lower():
animation.write_gif("videos/cft-animation.gif", fps=fps)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Create cross-flow turbine \
vector diagrams.")
parser.add_argument("create", choices=["figure", "diagram", "animation"],
help="Either create a static figure or animation")
parser.add_argument("--angle", type=float, default=60.0,
help="Angle (degrees) to create figure")
parser.add_argument("--show", action="store_true", default=False)
parser.add_argument("--save", "-s", action="store_true", default=False,
help="Save figure")
args = parser.parse_args()
if args.save:
if not os.path.isdir("figures"):
os.mkdir("figures")
if args.create == "diagram":
set_sns(font_scale=2)
plot_diagram(theta_deg=args.angle, label=True, axis="off",
save=args.save)
elif args.create == "figure":
set_sns()
plot_alpha_relvel_all(save=args.save)
elif args.create == "animation":
set_sns(font_scale=2)
from moviepy.editor import VideoClip
from moviepy.video.io.bindings import mplfig_to_npimage
make_animation()
if args.show:
plt.show()
| mit |
gdementen/PyTables | c-blosc/bench/plot-speeds.py | 11 | 6852 | """Script for plotting the results of the 'suite' benchmark.
Invoke without parameters for usage hints.
:Author: Francesc Alted
:Date: 2010-06-01
"""
import matplotlib as mpl
from pylab import *
KB_ = 1024
MB_ = 1024*KB_
GB_ = 1024*MB_
NCHUNKS = 128 # keep in sync with bench.c
linewidth=2
#markers= ['+', ',', 'o', '.', 's', 'v', 'x', '>', '<', '^']
#markers= [ 'x', '+', 'o', 's', 'v', '^', '>', '<', ]
markers= [ 's', 'o', 'v', '^', '+', 'x', '>', '<', '.', ',' ]
markersize = 8
def get_values(filename):
f = open(filename)
values = {"memcpyw": [], "memcpyr": []}
for line in f:
if line.startswith('-->'):
tmp = line.split('-->')[1]
nthreads, size, elsize, sbits, codec = [i for i in tmp.split(', ')]
nthreads, size, elsize, sbits = map(int, (nthreads, size, elsize, sbits))
values["size"] = size * NCHUNKS / MB_;
values["elsize"] = elsize;
values["sbits"] = sbits;
values["codec"] = codec
# New run for nthreads
(ratios, speedsw, speedsr) = ([], [], [])
# Add a new entry for (ratios, speedw, speedr)
values[nthreads] = (ratios, speedsw, speedsr)
#print "-->", nthreads, size, elsize, sbits
elif line.startswith('memcpy(write):'):
tmp = line.split(',')[1]
memcpyw = float(tmp.split(' ')[1])
values["memcpyw"].append(memcpyw)
elif line.startswith('memcpy(read):'):
tmp = line.split(',')[1]
memcpyr = float(tmp.split(' ')[1])
values["memcpyr"].append(memcpyr)
elif line.startswith('comp(write):'):
tmp = line.split(',')[1]
speedw = float(tmp.split(' ')[1])
ratio = float(line.split(':')[-1])
speedsw.append(speedw)
ratios.append(ratio)
elif line.startswith('decomp(read):'):
tmp = line.split(',')[1]
speedr = float(tmp.split(' ')[1])
speedsr.append(speedr)
if "OK" not in line:
print "WARNING! OK not found in decomp line!"
f.close()
return nthreads, values
def show_plot(plots, yaxis, legends, gtitle, xmax=None):
xlabel('Compresssion ratio')
ylabel('Speed (MB/s)')
title(gtitle)
xlim(0, xmax)
#ylim(0, 10000)
ylim(0, None)
grid(True)
# legends = [f[f.find('-'):f.index('.out')] for f in filenames]
# legends = [l.replace('-', ' ') for l in legends]
#legend([p[0] for p in plots], legends, loc = "upper left")
legend([p[0] for p in plots
if not isinstance(p, mpl.lines.Line2D)],
legends, loc = "best")
#subplots_adjust(bottom=0.2, top=None, wspace=0.2, hspace=0.2)
if outfile:
print "Saving plot to:", outfile
savefig(outfile, dpi=64)
else:
show()
if __name__ == '__main__':
from optparse import OptionParser
usage = "usage: %prog [-r] [-o outfile] [-t title ] [-d|-c] filename"
compress_title = 'Compression speed'
decompress_title = 'Decompression speed'
yaxis = 'No axis name'
parser = OptionParser(usage=usage)
parser.add_option('-o',
'--outfile',
dest='outfile',
help=('filename for output (many extensions '
'supported, e.g. .png, .jpg, .pdf)'))
parser.add_option('-t',
'--title',
dest='title',
help='title of the plot',)
parser.add_option('-l',
'--limit',
dest='limit',
help='expression to limit number of threads shown',)
parser.add_option('-x',
'--xmax',
dest='xmax',
help='limit the x-axis',
default=None)
parser.add_option('-r', '--report', action='store_true',
dest='report',
help='generate file for reporting ',
default=False)
parser.add_option('-d', '--decompress', action='store_true',
dest='dspeed',
help='plot decompression data',
default=False)
parser.add_option('-c', '--compress', action='store_true',
dest='cspeed',
help='plot compression data',
default=False)
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("No input arguments")
elif len(args) > 1:
parser.error("Too many input arguments")
else:
pass
if options.report and options.outfile:
parser.error("Can only select one of [-r, -o]")
if options.dspeed and options.cspeed:
parser.error("Can only select one of [-d, -c]")
elif options.cspeed:
options.dspeed = False
plot_title = compress_title
else: # either neither or dspeed
options.dspeed = True
plot_title = decompress_title
filename = args[0]
cspeed = options.cspeed
dspeed = options.dspeed
if options.outfile:
outfile = options.outfile
elif options.report:
if cspeed:
outfile = filename[:filename.rindex('.')] + '-compr.png'
else:
outfile = filename[:filename.rindex('.')] + '-decompr.png'
else:
outfile = None
plots = []
legends = []
nthreads, values = get_values(filename)
#print "Values:", values
if options.limit:
thread_range = eval(options.limit)
else:
thread_range = range(1, nthreads+1)
if options.title:
plot_title = options.title
else:
plot_title += " (%(size).1f MB, %(elsize)d bytes, %(sbits)d bits), %(codec)s" % values
gtitle = plot_title
for nt in thread_range:
#print "Values for %s threads --> %s" % (nt, values[nt])
(ratios, speedw, speedr) = values[nt]
if cspeed:
speed = speedw
else:
speed = speedr
#plot_ = semilogx(ratios, speed, linewidth=2)
plot_ = plot(ratios, speed, linewidth=2)
plots.append(plot_)
nmarker = nt
if nt >= len(markers):
nmarker = nt%len(markers)
setp(plot_, marker=markers[nmarker], markersize=markersize,
linewidth=linewidth)
legends.append("%d threads" % nt)
# Add memcpy lines
if cspeed:
mean = np.mean(values["memcpyw"])
message = "memcpy (write to memory)"
else:
mean = np.mean(values["memcpyr"])
message = "memcpy (read from memory)"
plot_ = axhline(mean, linewidth=3, linestyle='-.', color='black')
text(1.0, mean+50, message)
plots.append(plot_)
show_plot(plots, yaxis, legends, gtitle, xmax=int(options.xmax) if
options.xmax else None)
| bsd-3-clause |
arabenjamin/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/tslibs/test_parsing.py | 2 | 5799 | # -*- coding: utf-8 -*-
"""
Tests for Timestamp parsing, aimed at pandas/_libs/tslibs/parsing.pyx
"""
from datetime import datetime
from dateutil.parser import parse
import numpy as np
import pytest
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import parse_time_string
import pandas.util._test_decorators as td
from pandas.util import testing as tm
def test_parse_time_string():
(date, parsed, reso) = parse_time_string("4Q1984")
(date_lower, parsed_lower, reso_lower) = parse_time_string("4q1984")
assert date == date_lower
assert reso == reso_lower
assert parsed == parsed_lower
@pytest.mark.parametrize("dashed,normal", [
("1988-Q2", "1988Q2"),
("2Q-1988", "2Q1988")
])
def test_parse_time_quarter_with_dash(dashed, normal):
# see gh-9688
(date_dash, parsed_dash, reso_dash) = parse_time_string(dashed)
(date, parsed, reso) = parse_time_string(normal)
assert date_dash == date
assert parsed_dash == parsed
assert reso_dash == reso
@pytest.mark.parametrize("dashed", [
"-2Q1992", "2-Q1992", "4-4Q1992"
])
def test_parse_time_quarter_with_dash_error(dashed):
msg = ("Unknown datetime string format, "
"unable to parse: {dashed}".format(dashed=dashed))
with pytest.raises(parsing.DateParseError, match=msg):
parse_time_string(dashed)
@pytest.mark.parametrize("date_string,expected", [
("123.1234", False),
("-50000", False),
("999", False),
("m", False),
("T", False),
("Mon Sep 16, 2013", True),
("2012-01-01", True),
("01/01/2012", True),
("01012012", True),
("0101", True),
("1-1", True)
])
def test_does_not_convert_mixed_integer(date_string, expected):
assert parsing._does_string_look_like_datetime(date_string) is expected
@pytest.mark.parametrize("date_str,kwargs,msg", [
("2013Q5", dict(),
("Incorrect quarterly string is given, "
"quarter must be between 1 and 4: 2013Q5")),
# see gh-5418
("2013Q1", dict(freq="INVLD-L-DEC-SAT"),
("Unable to retrieve month information "
"from given freq: INVLD-L-DEC-SAT"))
])
def test_parsers_quarterly_with_freq_error(date_str, kwargs, msg):
with pytest.raises(parsing.DateParseError, match=msg):
parsing.parse_time_string(date_str, **kwargs)
@pytest.mark.parametrize("date_str,freq,expected", [
("2013Q2", None, datetime(2013, 4, 1)),
("2013Q2", "A-APR", datetime(2012, 8, 1)),
("2013-Q2", "A-DEC", datetime(2013, 4, 1))
])
def test_parsers_quarterly_with_freq(date_str, freq, expected):
result, _, _ = parsing.parse_time_string(date_str, freq=freq)
assert result == expected
@pytest.mark.parametrize("date_str", [
"2Q 2005", "2Q-200A", "2Q-200",
"22Q2005", "2Q200.", "6Q-20"
])
def test_parsers_quarter_invalid(date_str):
if date_str == "6Q-20":
msg = ("Incorrect quarterly string is given, quarter "
"must be between 1 and 4: {date_str}".format(date_str=date_str))
else:
msg = ("Unknown datetime string format, unable "
"to parse: {date_str}".format(date_str=date_str))
with pytest.raises(ValueError, match=msg):
parsing.parse_time_string(date_str)
@pytest.mark.parametrize("date_str,expected", [
("201101", datetime(2011, 1, 1, 0, 0)),
("200005", datetime(2000, 5, 1, 0, 0))
])
def test_parsers_month_freq(date_str, expected):
result, _, _ = parsing.parse_time_string(date_str, freq="M")
assert result == expected
@td.skip_if_not_us_locale
@pytest.mark.parametrize("string,fmt", [
("20111230", "%Y%m%d"),
("2011-12-30", "%Y-%m-%d"),
("30-12-2011", "%d-%m-%Y"),
("2011-12-30 00:00:00", "%Y-%m-%d %H:%M:%S"),
("2011-12-30T00:00:00", "%Y-%m-%dT%H:%M:%S"),
("2011-12-30 00:00:00.000000", "%Y-%m-%d %H:%M:%S.%f")
])
def test_guess_datetime_format_with_parseable_formats(string, fmt):
result = parsing._guess_datetime_format(string)
assert result == fmt
@pytest.mark.parametrize("dayfirst,expected", [
(True, "%d/%m/%Y"),
(False, "%m/%d/%Y")
])
def test_guess_datetime_format_with_dayfirst(dayfirst, expected):
ambiguous_string = "01/01/2011"
result = parsing._guess_datetime_format(ambiguous_string,
dayfirst=dayfirst)
assert result == expected
@td.skip_if_has_locale
@pytest.mark.parametrize("string,fmt", [
("30/Dec/2011", "%d/%b/%Y"),
("30/December/2011", "%d/%B/%Y"),
("30/Dec/2011 00:00:00", "%d/%b/%Y %H:%M:%S")
])
def test_guess_datetime_format_with_locale_specific_formats(string, fmt):
result = parsing._guess_datetime_format(string)
assert result == fmt
@pytest.mark.parametrize("invalid_dt", [
"2013", "01/2013", "12:00:00", "1/1/1/1",
"this_is_not_a_datetime", "51a", 9,
datetime(2011, 1, 1)
])
def test_guess_datetime_format_invalid_inputs(invalid_dt):
# A datetime string must include a year, month and a day for it to be
# guessable, in addition to being a string that looks like a datetime.
assert parsing._guess_datetime_format(invalid_dt) is None
@pytest.mark.parametrize("string,fmt", [
("2011-1-1", "%Y-%m-%d"),
("1/1/2011", "%m/%d/%Y"),
("30-1-2011", "%d-%m-%Y"),
("2011-1-1 0:0:0", "%Y-%m-%d %H:%M:%S"),
("2011-1-3T00:00:0", "%Y-%m-%dT%H:%M:%S"),
("2011-1-1 00:00:00", "%Y-%m-%d %H:%M:%S")
])
def test_guess_datetime_format_no_padding(string, fmt):
# see gh-11142
result = parsing._guess_datetime_format(string)
assert result == fmt
def test_try_parse_dates():
arr = np.array(["5/1/2000", "6/1/2000", "7/1/2000"], dtype=object)
result = parsing.try_parse_dates(arr, dayfirst=True)
expected = np.array([parse(d, dayfirst=True) for d in arr])
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
lenovor/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
paultcochrane/bokeh | examples/charts/file/stocks_timeseries.py | 33 | 1230 | from collections import OrderedDict
import pandas as pd
from bokeh.charts import TimeSeries, show, output_file
# read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
xyvalues = OrderedDict(
AAPL=AAPL['Adj Close'],
Date=AAPL['Date'],
MSFT=MSFT['Adj Close'],
IBM=IBM['Adj Close'],
)
# any of the following commented are valid Bar inputs
#xyvalues = pd.DataFrame(xyvalues)
#lindex = xyvalues.pop('Date')
#lxyvalues = list(xyvalues.values())
#lxyvalues = np.array(xyvalues.values())
TOOLS="resize,pan,wheel_zoom,box_zoom,reset,previewsave"
output_file("stocks_timeseries.html")
ts = TimeSeries(
xyvalues, index='Date', legend=True,
title="Timeseries", tools=TOOLS, ylabel='Stock Prices')
# usage with iterable index
#ts = TimeSeries(
# lxyvalues, index=lindex,
# title="timeseries, pd_input", ylabel='Stock Prices')
show(ts)
| bsd-3-clause |
jeffzheng1/tensorflow | tensorflow/contrib/learn/python/learn/experiment.py | 4 | 15233 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experiment class collecting information needed for a single training run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import time
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
__all__ = ["Experiment"]
class Experiment(object):
"""Experiment is a class containing all information needed to train a model.
After an experiment is created (by passing an Estimator and inputs for
training and evaluation), an Experiment instance knows how to invoke training
and eval loops in a sensible fashion for distributed training.
"""
@deprecated_arg_values(
"2016-10-23",
"local_eval_frequency is deprecated as local_run will be renamed to "
"train_and_evaluate. Use min_eval_frequency and call train_and_evaluate "
"instead. Note, however, that the default for min_eval_frequency is 1, "
"meaning models will be evaluated every time a new checkpoint is "
"available. In contrast, the default for local_eval_frequency is None, "
"resulting in evaluation occurring only after training has completed. "
"min_eval_frequency is ignored when calling the deprecated local_run.",
local_eval_frequency=None)
def __init__(self,
estimator,
train_input_fn,
eval_input_fn,
eval_metrics=None,
train_steps=None,
eval_steps=100,
train_monitors=None,
local_eval_frequency=None,
eval_delay_secs=120,
continuous_eval_throttle_secs=60,
min_eval_frequency=1):
"""Constructor for `Experiment`.
Creates an Experiment instance. None of the functions passed to this
constructor are executed at construction time. They are stored and used
when a method is executed which requires it.
Args:
estimator: Object implementing `Trainable` and `Evaluable`.
train_input_fn: function, returns features and labels for training.
eval_input_fn: function, returns features and labels for evaluation. If
`eval_steps` is `None`, this should be configured only to produce for a
finite number of batches (generally, 1 epoch over the evaluation data).
eval_metrics: `dict` of string, metric function. If `None`, default set
is used.
train_steps: Perform this many steps of training. `None`, the default,
means train forever.
eval_steps: `evaluate` runs until input is exhausted (or another exception
is raised), or for `eval_steps` steps, if specified.
train_monitors: A list of monitors to pass to the `Estimator`'s `fit`
function.
local_eval_frequency: Frequency of running eval in steps,
when running locally. If `None`, runs evaluation only at the end of
training.
eval_delay_secs: Start evaluating after waiting for this many seconds.
continuous_eval_throttle_secs: Do not re-evaluate unless the last
evaluation was started at least this many seconds ago for
continuous_eval().
min_eval_frequency: (applies only to train_and_evaluate). the minimum
number of steps between evaluations. Of course, evaluation does not
occur if no new snapshot is available, hence, this is the minimum.
Raises:
ValueError: if `estimator` does not implement `Evaluable` and `Trainable`.
"""
if not isinstance(estimator, evaluable.Evaluable):
raise ValueError("`estimator` must implement `Evaluable`.")
if not isinstance(estimator, trainable.Trainable):
raise ValueError("`estimator` must implement `Trainable`.")
super(Experiment, self).__init__()
self._estimator = estimator
self._train_input_fn = train_input_fn
self._eval_input_fn = eval_input_fn
self._eval_metrics = eval_metrics
self._train_steps = train_steps
self._eval_steps = eval_steps
self._train_monitors = train_monitors
self._local_eval_frequency = local_eval_frequency
self._eval_delay_secs = eval_delay_secs
self._continuous_eval_throttle_secs = continuous_eval_throttle_secs
self._min_eval_frequency = min_eval_frequency
@property
def estimator(self):
return self._estimator
def train(self, delay_secs=None):
"""Fit the estimator using the training data.
Train the estimator for `self._train_steps` steps, after waiting for
`delay_secs` seconds. If `self._train_steps` is `None`, train forever.
Args:
delay_secs: Start training after this many seconds.
Returns:
The trained estimator.
"""
start = time.time()
# Start the server, if needed. It's important to start the server before
# we (optionally) sleep for the case where no device_filters are set.
# Otherwise, the servers will wait to connect to each other before starting
# to train. We might as well start as soon as we can.
if self._estimator.config.cluster_spec and self._estimator.config.master:
self._start_server()
if delay_secs is None:
task_id = self._estimator.config.task or 0
delay_secs = min(60, task_id * 5)
if delay_secs:
elapsed_secs = time.time() - start
remaining = delay_secs - elapsed_secs
logging.info("Waiting %d secs before starting training.", remaining)
time.sleep(delay_secs)
return self._estimator.fit(input_fn=self._train_input_fn,
max_steps=self._train_steps,
monitors=self._train_monitors)
def evaluate(self, delay_secs=None):
"""Evaluate on the evaluation data.
Runs evaluation on the evaluation data and returns the result. Runs for
`self._eval_steps` steps, or if it's `None`, then run until input is
exhausted or another exception is raised. Start the evaluation after
`delay_secs` seconds, or if it's `None`, defaults to using
`self._eval_delay_secs` seconds.
Args:
delay_secs: Start evaluating after this many seconds. If `None`, defaults
to using `self._eval_delays_secs`.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
if delay_secs is None:
delay_secs = self._eval_delay_secs
if delay_secs:
logging.info("Waiting %d secs before starting eval.", delay_secs)
time.sleep(delay_secs)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name="one_pass")
@deprecated(
"2016-10-23",
"local_run will be renamed to train_and_evaluate and the new default "
"behavior will be to run evaluation every time there is a new "
"checkpoint.")
def local_run(self):
with _new_attr_context(self, "_min_eval_frequency"):
self._min_eval_frequency = self._local_eval_frequency
return self.train_and_evaluate()
def _continuous_eval(self,
input_fn,
name,
delay_secs,
throttle_delay_secs):
"""Run continuous eval.
Runs infinite eval on the evaluation data set. This function starts
evaluating after `delay_secs` seconds and then runs no more than one
evaluation (with `self._eval_steps` steps each time) per
`throttle_delay_secs`. It never returns.
Args:
input_fn: The input to use for this eval.
name: A string appended to the folder name of evaluation results.
delay_secs: Start evaluating after this many seconds. If None, defaults to
self._eval_delay_secs.
throttle_delay_secs: Do not re-evaluate unless the last evaluation was
started at least this many seconds ago. If None, defaults to
self._continuous_eval_throttle_secs.
"""
if delay_secs is None:
delay_secs = self._eval_delay_secs
if throttle_delay_secs is None:
throttle_delay_secs = self._continuous_eval_throttle_secs
if delay_secs:
logging.info("Waiting %f secs before starting eval.", delay_secs)
time.sleep(delay_secs)
last_fitted_error_time = 0
while True:
start = time.time()
try:
self._estimator.evaluate(input_fn=input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=name)
except NotFittedError:
# Print warning message every 10 mins.
if time.time() - last_fitted_error_time > 600:
logging.warning(
"Estimator is not fitted yet. "
"Will start an evaluation when a checkpoint will be ready.")
last_fitted_error_time = time.time()
duration = time.time() - start
if duration < throttle_delay_secs:
difference = throttle_delay_secs - duration
logging.info("Waiting %f secs before starting next eval run.",
difference)
time.sleep(difference)
def continuous_eval(self, delay_secs=None, throttle_delay_secs=None):
self._continuous_eval(self._eval_input_fn,
name="continuous",
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs)
def continuous_eval_on_train_data(self,
delay_secs=None,
throttle_delay_secs=None):
self._continuous_eval(self._train_input_fn,
name="continuous_on_train_data",
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs)
def train_and_evaluate(self):
"""Interleaves training and evaluation.
The frequency of evaluation is controlled by the contructor arg
`min_eval_frequency`. When this parameter is None or 0, evaluation happens
only after training has completed. Note that evaluation cannot happen
more frequently than checkpoints are taken. If no new snapshots are
available when evaluation is supposed to occur, then evaluation doesn't
happen for another `min_eval_frequency` steps (assuming a checkpoint is
available at that point). Thus, settings `min_eval_frequency` to 1 means
that the model will be evaluated everytime there is a new checkpoint.
This is particular useful for a "Master" task in the cloud, whose
responsibility it is to take checkpoints, evaluate those checkpoints,
and write out summaries. Participating in training as the supervisor
allows such a task to accomplish the first and last items, while
performing evaluation allows for the second.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
# The directory to which evaluation summaries are written are determined
# by adding a suffix to 'eval'; that suffix is the 'name' parameter to
# the various evaluate(...) methods. By setting it to None, we force
# the directory name to simply be 'eval'.
eval_dir_suffix = None
# We set every_n_steps to 1, but evaluation only occurs when a new
# snapshot is available. If, by the time we finish evaluation
# there is a new snapshot, then we just evaluate again. Otherwise,
# we keep training until one becomes available.
with _new_attr_context(self, "_train_monitors"):
self._train_monitors = self._train_monitors or []
if self._min_eval_frequency:
self._train_monitors += [monitors.ValidationMonitor(
input_fn=self._eval_input_fn, eval_steps=self._eval_steps,
metrics=self._eval_metrics, every_n_steps=self._min_eval_frequency,
name=eval_dir_suffix,
)]
self.train(delay_secs=0)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=eval_dir_suffix)
def run_std_server(self):
"""Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
"""
self._start_server().join()
def test(self):
"""Tests training and evaluating the estimator both for a single step.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
self._estimator.fit(input_fn=self._train_input_fn,
steps=1,
monitors=self._train_monitors)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=1,
metrics=self._eval_metrics,
name="one_pass")
def _start_server(self):
"""Creates, starts, and returns a server_lib.Server."""
config = self._estimator.config
if (not config.cluster_spec or not config.job_name or not config.master or
config.task is None):
raise ValueError("Could not start server; be sure to specify "
"cluster_spec, job_name, master, and task in "
"RunConfig or set the TF_CONFIG environment variable.")
server = server_lib.Server(
config.cluster_spec,
job_name=config.job_name,
task_index=config.task,
config=config.tf_config,
start=False)
server.start()
return server
@contextlib.contextmanager
def _new_attr_context(obj, attr):
"""Creates a new context in which an object's attribute can be changed.
This creates a context in which an object's attribute can be changed.
Once the context is exited, the attribute reverts to its original value.
Example usage:
my_obj.x = 1
with _new_attr_context(my_obj, "x"):
my_obj.x = 2
print(my_obj.x)
print(my_obj.x)
"""
saved = getattr(obj, attr)
try:
yield
finally:
setattr(obj, attr, saved)
| apache-2.0 |
matthew-tucker/mne-python | examples/time_frequency/plot_source_power_spectrum.py | 19 | 1929 | """
=========================================================
Compute power spectrum densities of the sources with dSPM
=========================================================
Returns an STC file containing the PSD (in dB) of each of the sources.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
# Setup for reading the raw data
raw = io.Raw(raw_fname, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
label = mne.read_label(fname_label)
stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
pick_ori="normal", n_fft=n_fft, label=label)
stc.save('psd_dSPM')
###############################################################################
# View PSD of sources in label
plt.plot(1e3 * stc.times, stc.data.T)
plt.xlabel('Frequency (Hz)')
plt.ylabel('PSD (dB)')
plt.title('Source Power Spectrum (PSD)')
plt.show()
| bsd-3-clause |
bradleyhd/netsim | nodes_vs_routing_speed.py | 1 | 2878 | import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.optimize import curve_fit
def linear(x, a, b):
return a * x + b
def quadratic(x, a, b, c):
return a * x**2 + b * x + c
def exponential(x, a, b, c):
return a * x**b + c
fig = plt.figure(num=None, figsize=(12, 8), dpi=300, facecolor='k', edgecolor='k')
xs = [[1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292]]
ys = [[0.00013309850001519408, 0.00059208550001699223, 0.002604027000003839, 0.004665461000030291, 0.014662985999962075, 0.023410306499954459, 0.041176939000251878], [0.00014861549998101964, 0.00055641999999522795, 0.002577900000005684, 0.0054275369999459144, 0.021226498000032734, 0.029786237500047719, 0.059782716000881919], [0.00012334000000180367, 0.00043368899999052246, 0.0020054734999632728, 0.005848614000001362, 0.014609930999995413, 0.019599954500336025, 0.028973604500606598], [0.00012613299999486571, 0.00044437049999146438, 0.0021501399999692694, 0.0055929929999933847, 0.019908546500118973, 0.039582631500252319, 0.054390303499531001]]
ys = np.array(ys) * 1000
def graph(i, label, color, marker, l_marker):
y = np.array(ys[i])
x = np.array(xs[i])
xl = np.linspace(np.min(x), np.max(x), 500)
popt, pcov = curve_fit(exponential, x, y)
plt.scatter(x, y, label=label, color=color, marker=marker)
plt.plot(xl, exponential(xl, *popt), color=color, linestyle=l_marker)
blue = '#5738FF'
purple = '#E747E7'
orange = '#E7A725'
green = '#A1FF47'
red = '#FF1E43'
gray = '#333333'
white = 'w'
graph(0, 'EDS5 - original graph', red, 'o', '--')
graph(1, 'N5 - original graph', purple, 's', '--')
graph(2, 'EDS5 - decision graph', blue, '^', '--')
graph(3, 'N5 - decision graph', white, 'D', '--')
ax = fig.gca()
plt.title('Effects of Node Ordering on Routing Speed', color=white)
plt.xlabel('Effective $\\vert V\/\\vert$')
plt.ylabel('Routing Time (ms)')
plt.axes().set_axis_bgcolor('black')
ax.xaxis.label.set_color(white)
ax.yaxis.label.set_color(white)
ax.tick_params(axis='x', colors=white)
ax.tick_params(axis='y', colors=white)
ax.spines['bottom'].set_color(white)
ax.spines['top'].set_color(white)
ax.spines['left'].set_color(white)
ax.spines['right'].set_color(white)
legend = plt.legend(loc=0, numpoints=1, framealpha=0.0)
legend.get_frame().set_facecolor('k')
max_x = np.max(np.array(xs))
max_y = np.max(np.array(ys))
min_x = np.min(np.array(xs))
min_y = 0 - (max_y * 0.01)
min_x = 0 - (max_x * 0.01)
max_x *= 1.01
max_y *= 1.01
plt.axes().set_xlim([min_x, max_x])
plt.axes().set_ylim([min_y, max_y])
for text in legend.get_texts():
text.set_color(white)
# plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.savefig('nodes_vs_routing_speed.png', transparent=True)
#plt.show() | gpl-3.0 |
phev8/ward-metrics | wardmetrics/visualisations.py | 1 | 16641 | import matplotlib.pyplot as plt
def plot_events_with_segment_scores(segment_results, ground_truth_events, detected_events, use_datetime_x=False, show=True):
"""
Test
:param segment_results:
:param ground_truth_events:
:param detected_events:
:param use_datetime_x:
:param show:
:return:
"""
fig = plt.figure(figsize=(10, 3))
a = 3
# TODO: convert times to datetime if flag is set
# write y axis labels for ground truth and detections
plt.yticks([0.2, 0.5, 0.8], ["detections", "segment score", "actual events"])
plt.ylim([0, 1])
for d in detected_events:
plt.axvspan(d[0], d[1], 0, 0.5)
for gt in ground_truth_events:
plt.axvspan(gt[0], gt[1], 0.5, 1)
for s in segment_results:
color = "black"
index_of_cat = 4
if s[index_of_cat] == "TP":
color = "green"
elif s[index_of_cat] == "FP":
color = "red"
elif s[index_of_cat] == "FN":
color = "yellow"
elif s[index_of_cat] == "TN":
color = "blue"
# TODO: format text nicely
plt.text((s[1]+s[0])/2, 0.8, s[2], horizontalalignment='center', verticalalignment='center')
plt.text((s[1]+s[0])/2, 0.2, s[3], horizontalalignment='center', verticalalignment='center')
plt.text((s[1]+s[0])/2, 0.5, s[5], horizontalalignment='center', verticalalignment='center')
plt.axvspan(s[0], s[1], 0.4, 0.6, color=color)
plt.axvline(s[0], color="black")
plt.axvline(s[1], color="black")
plt.tight_layout()
if show:
plt.show()
else:
plt.draw()
def plot_events_with_event_scores(gt_event_scores, detected_event_scores, ground_truth_events, detected_events, show=True):
fig = plt.figure(figsize=(10, 3))
for i in range(len(detected_events)):
d = detected_events[i]
plt.axvspan(d[0], d[1], 0, 0.5)
plt.text((d[1] + d[0]) / 2, 0.2, detected_event_scores[i], horizontalalignment='center', verticalalignment='center')
for i in range(len(ground_truth_events)):
gt = ground_truth_events[i]
plt.axvspan(gt[0], gt[1], 0.5, 1)
plt.text((gt[1] + gt[0]) / 2, 0.8, gt_event_scores[i], horizontalalignment='center', verticalalignment='center')
plt.tight_layout()
if show:
plt.show()
else:
plt.draw()
def plot_twoset_metrics(results, startangle=120):
fig1, axarr = plt.subplots(1, 2)
# plot positive rates:
labels_1 = ["tpr", "us", "ue", "fr", "dr"]
values_1 = [
results["tpr"],
results["us"],
results["ue"],
results["fr"],
results["dr"]
]
axarr[0].pie(values_1, labels=labels_1, autopct='%1.0f%%', startangle=startangle)
axarr[0].axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
# TODO: add title
# plot negative rates:
labels_2 = ["1-fpr", "os", "oe", "mr", "ir"]
values_2 = [
1-results["fpr"],
results["os"],
results["oe"],
results["mr"],
results["ir"]
]
axarr[1].pie(values_2, labels=labels_2, autopct='%1.0f%%', startangle=startangle)
axarr[1].axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
# TODO: add title
plt.show()
def plot_segment_counts(results):
# TODO: add title
labels = results.keys()
values = []
for label in labels:
values.append(results[label])
#explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
total = sum(values)
fig1, ax1 = plt.subplots()
ax1.pie(values, labels=labels, autopct=lambda p: '{:.0f}'.format(p * total / 100), startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
def plot_event_analysis_diagram(event_results, **kwargs):
""" Plot the event analysis diagram (EAD) for the given results
Visualisation of the distribution of specific error types either with the actual event count or
showing the percentage of the total events. Elements of the plot can be adjusted (like color, fontsize etc.)
Args:
event_results (dictionary): Dictionary containing event counts for "total_gt", "total_det", "D", "F", "FM", "M",
"C", "M'", "FM'", "F'", "I'" as returned by core_methods.event_metrics' third value
Keyword Arguments:
fontsize (int): Size of the text inside the bar plot (Reduce the value if some event types are too short)
use_percentage (bool): whether percentage values or to show actual event counts on the chart (default: False)
show (bool): whether to call plt.show (blocking) or plt.draw() for later displaying (default: True)
color_deletion: any matplotlib color for deletion events
color_fragmented: any matplotlib color for fragmented ground truth events
color_fragmented_merged: any matplotlib color for merged and fragmented ground truth events
color_merged: any matplotlib color for merged ground truth events
color_correct: any matplotlib color for correct events
color_merging: any matplotlib color for merging detection events
color_merging_fragmenting: any matplotlib color for merging and fragmenting detection events
color_fragmenting: any matplotlib color for merging detection events
color_insertion: any matplotlib color for insertion events
Returns:
matplotlib Figure: matplotlib figure reference
"""
fig = plt.figure(figsize=(10, 2))
total = event_results["total_gt"] + event_results["total_det"] - event_results["C"]
# Layout settings:
y_min = 0.3
y_max = 0.7
width = 0.02
text_x_offset = 0
text_y_pos_1 = 0.55
text_y_pos_2 = 0.4
fontsize = kwargs.pop('fontsize', 10)
fontsize_extern = 12
use_percentage = kwargs.pop('use_percentage', False)
# Color settings:
cmap = plt.get_cmap("Paired")
color_deletion = kwargs.pop('color_deletion', cmap(4))
color_fragmented = kwargs.pop('color_fragmented', cmap(6))
color_fragmented_merged = kwargs.pop('color_fragmented_merged', cmap(0))
color_merged = kwargs.pop('color_merged', cmap(8))
color_correct = kwargs.pop('color_correct', cmap(3))
color_merging = kwargs.pop('color_merging', cmap(9))
color_merging_fragmenting = kwargs.pop('color_merging_fragmenting', cmap(1))
color_fragmenting = kwargs.pop('color_fragmenting', cmap(7))
color_insertion = kwargs.pop('color_insertion', cmap(5))
# Show deletions:
current_score = "D"
current_x_start = 0
current_x_end = event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_deletion)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score]*100/event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmented events:
current_score = "F"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmented)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmented and merged events:
current_score = "FM"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmented_merged)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show merged events:
current_score = "M"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merged)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show correct events:
current_score = "C"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_correct)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%/" + "{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show merging detections:
current_score = "M'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merging)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmenting and merging detections:
current_score = "FM'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merging_fragmenting)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmenting detections:
current_score = "F'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmenting)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show insertions:
current_score = "I'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_insertion)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Draw line for total events:
plt.axvspan(0, event_results["total_gt"], y_max, y_max + width, color="black")
plt.axvspan( total - event_results["total_det"], total, y_min, y_min - width, color="black")
plt.text((0 + event_results["total_gt"]) / 2, 0.8, "Actual events (total=" + str(event_results["total_gt"]) + ")",
fontsize=fontsize_extern, horizontalalignment='center', verticalalignment='center')
plt.text((2*total - event_results["total_det"]) / 2, 0.18, "Detected events (total=" + str(event_results["total_det"]) + ")",
horizontalalignment='center', fontsize=fontsize_extern, verticalalignment='center')
plt.tight_layout()
if kwargs.pop('show', True):
plt.show()
else:
plt.draw()
return fig
| mit |
duthchao/kaggle-galaxies | predict_augmented_npy_maxout2048_pysex.py | 7 | 9584 | """
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
BATCH_SIZE = 32 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_pysex.pkl"
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_TEST = True # disable this to not generate predictions on the testset
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
# l4 = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5)
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_test
load_data.save_gz(target_path_test, all_predictions)
print "Done!"
| bsd-3-clause |
thypad/brew | skensemble/generation/bagging.py | 3 | 2140 | import numpy as np
from sklearn.ensemble import BaggingClassifier
from brew.base import Ensemble
from brew.combination.combiner import Combiner
import sklearn
from .base import PoolGenerator
class Bagging(PoolGenerator):
def __init__(self,
base_classifier=None,
n_classifiers=100,
combination_rule='majority_vote'):
self.base_classifier = base_classifier
self.n_classifiers = n_classifiers
self.ensemble = None
self.combiner = Combiner(rule=combination_rule)
def fit(self, X, y):
self.ensemble = Ensemble()
for _ in range(self.n_classifiers):
# bootstrap
idx = np.random.choice(X.shape[0], X.shape[0], replace=True)
data, target = X[idx, :], y[idx]
classifier = sklearn.base.clone(self.base_classifier)
classifier.fit(data, target)
self.ensemble.add(classifier)
return
def predict(self, X):
out = self.ensemble.output(X)
return self.combiner.combine(out)
class BaggingSK(PoolGenerator):
""""
This class should not be used, use brew.generation.bagging.Bagging instead.
"""
def __init__(self,
base_classifier=None,
n_classifiers=100,
combination_rule='majority_vote'):
self.base_classifier = base_classifier
self.n_classifiers = n_classifiers
# using the sklearn implementation of bagging for now
self.sk_bagging = BaggingClassifier(base_estimator=base_classifier,
n_estimators=n_classifiers,
max_samples=1.0,
max_features=1.0)
self.ensemble = Ensemble()
self.combiner = Combiner(rule=combination_rule)
def fit(self, X, y):
self.sk_bagging.fit(X, y)
self.ensemble.add_classifiers(self.sk_bagging.estimators_)
# self.classes_ = set(y)
def predict(self, X):
out = self.ensemble.output(X)
return self.combiner.combine(out)
| mit |
kaiserroll14/301finalproject | main/pandas/sparse/panel.py | 9 | 18717 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import warnings
from pandas.compat import range, lrange, zip
from pandas import compat
import numpy as np
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
from pandas.sparse.frame import SparseDataFrame
from pandas.util.decorators import deprecate
import pandas.core.common as com
import pandas.core.ops as ops
class SparsePanelAxis(object):
def __init__(self, cache_field, frame_attr):
self.cache_field = cache_field
self.frame_attr = frame_attr
def __get__(self, obj, type=None):
return getattr(obj, self.cache_field, None)
def __set__(self, obj, value):
value = _ensure_index(value)
if isinstance(value, MultiIndex):
raise NotImplementedError("value cannot be a MultiIndex")
for v in compat.itervalues(obj._frames):
setattr(v, self.frame_attr, value)
setattr(obj, self.cache_field, value)
class SparsePanel(Panel):
"""
Sparse version of Panel
Parameters
----------
frames : dict of DataFrame objects
items : array-like
major_axis : array-like
minor_axis : array-like
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
Notes
-----
"""
ndim = 3
_typ = 'panel'
_subtyp = 'sparse_panel'
def __init__(self, frames=None, items=None, major_axis=None, minor_axis=None,
default_fill_value=np.nan, default_kind='block',
copy=False):
# deprecation #11157
warnings.warn("SparsePanel is deprecated and will be removed in a future version",
FutureWarning, stacklevel=2)
if frames is None:
frames = {}
if isinstance(frames, np.ndarray):
new_frames = {}
for item, vals in zip(items, frames):
new_frames[item] = \
SparseDataFrame(vals, index=major_axis,
columns=minor_axis,
default_fill_value=default_fill_value,
default_kind=default_kind)
frames = new_frames
if not isinstance(frames, dict):
raise TypeError('input must be a dict, a %r was passed' %
type(frames).__name__)
self.default_fill_value = fill_value = default_fill_value
self.default_kind = kind = default_kind
# pre-filter, if necessary
if items is None:
items = Index(sorted(frames.keys()))
items = _ensure_index(items)
(clean_frames,
major_axis,
minor_axis) = _convert_frames(frames, major_axis,
minor_axis, kind=kind,
fill_value=fill_value)
self._frames = clean_frames
# do we want to fill missing ones?
for item in items:
if item not in clean_frames:
raise ValueError('column %r not found in data' % item)
self._items = items
self.major_axis = major_axis
self.minor_axis = minor_axis
def _consolidate_inplace(self): # pragma: no cover
# do nothing when DataFrame calls this method
pass
def __array_wrap__(self, result):
return SparsePanel(result, items=self.items,
major_axis=self.major_axis,
minor_axis=self.minor_axis,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
@classmethod
def from_dict(cls, data):
"""
Analogous to Panel.from_dict
"""
return SparsePanel(data)
def to_dense(self):
"""
Convert SparsePanel to (dense) Panel
Returns
-------
dense : Panel
"""
return Panel(self.values, self.items, self.major_axis,
self.minor_axis)
def as_matrix(self):
return self.values
@property
def values(self):
# return dense values
return np.array([self._frames[item].values
for item in self.items])
# need a special property for items to make the field assignable
_items = None
def _get_items(self):
return self._items
def _set_items(self, new_items):
new_items = _ensure_index(new_items)
if isinstance(new_items, MultiIndex):
raise NotImplementedError("itemps cannot be a MultiIndex")
# need to create new frames dict
old_frame_dict = self._frames
old_items = self._items
self._frames = dict((new_k, old_frame_dict[old_k])
for new_k, old_k in zip(new_items, old_items))
self._items = new_items
items = property(fget=_get_items, fset=_set_items)
# DataFrame's index
major_axis = SparsePanelAxis('_major_axis', 'index')
# DataFrame's columns / "items"
minor_axis = SparsePanelAxis('_minor_axis', 'columns')
def _ixs(self, i, axis=0):
"""
for compat as we don't support Block Manager here
i : int, slice, or sequence of integers
axis : int
"""
key = self._get_axis(axis)[i]
# xs cannot handle a non-scalar key, so just reindex here
if com.is_list_like(key):
return self.reindex(**{self._get_axis_name(axis): key})
return self.xs(key, axis=axis)
def _slice(self, slobj, axis=0, kind=None):
"""
for compat as we don't support Block Manager here
"""
axis = self._get_axis_name(axis)
index = self._get_axis(axis)
return self.reindex(**{axis: index[slobj]})
def _get_item_cache(self, key):
return self._frames[key]
def __setitem__(self, key, value):
if isinstance(value, DataFrame):
value = value.reindex(index=self.major_axis,
columns=self.minor_axis)
if not isinstance(value, SparseDataFrame):
value = value.to_sparse(fill_value=self.default_fill_value,
kind=self.default_kind)
else:
raise ValueError('only DataFrame objects can be set currently')
self._frames[key] = value
if key not in self.items:
self._items = Index(list(self.items) + [key])
def set_value(self, item, major, minor, value):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Panel
Returns
-------
panel : SparsePanel
"""
dense = self.to_dense().set_value(item, major, minor, value)
return dense.to_sparse(kind=self.default_kind,
fill_value=self.default_fill_value)
def __delitem__(self, key):
loc = self.items.get_loc(key)
indices = lrange(loc) + lrange(loc + 1, len(self.items))
del self._frames[key]
self._items = self._items.take(indices)
def __getstate__(self):
# pickling
return (self._frames, com._pickle_array(self.items),
com._pickle_array(self.major_axis),
com._pickle_array(self.minor_axis),
self.default_fill_value, self.default_kind)
def __setstate__(self, state):
frames, items, major, minor, fv, kind = state
self.default_fill_value = fv
self.default_kind = kind
self._items = _ensure_index(com._unpickle_array(items))
self._major_axis = _ensure_index(com._unpickle_array(major))
self._minor_axis = _ensure_index(com._unpickle_array(minor))
self._frames = frames
def copy(self, deep=True):
"""
Make a copy of the sparse panel
Returns
-------
copy : SparsePanel
"""
d = self._construct_axes_dict()
if deep:
new_data = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(self._frames))
d = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(d))
else:
new_data = self._frames.copy()
d['default_fill_value']=self.default_fill_value
d['default_kind']=self.default_kind
return SparsePanel(new_data, **d)
def to_frame(self, filter_observations=True):
"""
Convert SparsePanel to (dense) DataFrame
Returns
-------
frame : DataFrame
"""
if not filter_observations:
raise TypeError('filter_observations=False not supported for '
'SparsePanel.to_long')
I, N, K = self.shape
counts = np.zeros(N * K, dtype=int)
d_values = {}
d_indexer = {}
for item in self.items:
frame = self[item]
values, major, minor = _stack_sparse_info(frame)
# values are stacked column-major
indexer = minor * N + major
counts.put(indexer, counts.take(indexer) + 1) # cuteness
d_values[item] = values
d_indexer[item] = indexer
# have full set of observations for each item
mask = counts == I
# for each item, take mask values at index locations for those sparse
# values, and use that to select values
values = np.column_stack([d_values[item][mask.take(d_indexer[item])]
for item in self.items])
inds, = mask.nonzero()
# still column major
major_labels = inds % N
minor_labels = inds // N
index = MultiIndex(levels=[self.major_axis, self.minor_axis],
labels=[major_labels, minor_labels],
verify_integrity=False)
df = DataFrame(values, index=index, columns=self.items)
return df.sortlevel(level=0)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def reindex(self, major=None, items=None, minor=None, major_axis=None,
minor_axis=None, copy=False):
"""
Conform / reshape panel axis labels to new input labels
Parameters
----------
major : array-like, default None
items : array-like, default None
minor : array-like, default None
copy : boolean, default False
Copy underlying SparseDataFrame objects
Returns
-------
reindexed : SparsePanel
"""
major = com._mut_exclusive(major=major, major_axis=major_axis)
minor = com._mut_exclusive(minor=minor, minor_axis=minor_axis)
if com._all_none(items, major, minor):
raise ValueError('Must specify at least one axis')
major = self.major_axis if major is None else major
minor = self.minor_axis if minor is None else minor
if items is not None:
new_frames = {}
for item in items:
if item in self._frames:
new_frames[item] = self._frames[item]
else:
raise NotImplementedError('Reindexing with new items not yet '
'supported')
else:
new_frames = self._frames
if copy:
new_frames = dict((k, v.copy()) for k, v in compat.iteritems(new_frames))
return SparsePanel(new_frames, items=items,
major_axis=major,
minor_axis=minor,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
def _combine(self, other, func, axis=0):
if isinstance(other, DataFrame):
return self._combineFrame(other, func, axis=axis)
elif isinstance(other, Panel):
return self._combinePanel(other, func)
elif np.isscalar(other):
new_frames = dict((k, func(v, other))
for k, v in compat.iteritems(self))
return self._new_like(new_frames)
def _combineFrame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
# TODO: make faster!
new_frames = {}
for item, item_slice in zip(self.items, new_values):
old_frame = self[item]
ofv = old_frame.default_fill_value
ok = old_frame.default_kind
new_frames[item] = SparseDataFrame(item_slice,
index=self.major_axis,
columns=self.minor_axis,
default_fill_value=ofv,
default_kind=ok)
return self._new_like(new_frames)
def _new_like(self, new_frames):
return SparsePanel(new_frames, self.items, self.major_axis,
self.minor_axis,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
def _combinePanel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
new_frames = {}
for item in items:
new_frames[item] = func(this[item], other[item])
if not isinstance(other, SparsePanel):
new_default_fill = self.default_fill_value
else:
# maybe unnecessary
new_default_fill = func(self.default_fill_value,
other.default_fill_value)
return SparsePanel(new_frames, items, major, minor,
default_fill_value=new_default_fill,
default_kind=self.default_kind)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
"""
slices = dict((k, v.xs(key)) for k, v in compat.iteritems(self))
return DataFrame(slices, index=self.minor_axis, columns=self.items)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : SparseDataFrame
index -> major axis, columns -> items
"""
slices = dict((k, v[key]) for k, v in compat.iteritems(self))
return SparseDataFrame(slices, index=self.major_axis,
columns=self.items,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
# TODO: allow SparsePanel to work with flex arithmetic.
# pow and mod only work for scalars for now
def pow(self, val, *args, **kwargs):
"""wrapper around `__pow__` (only works for scalar values)"""
return self.__pow__(val)
def mod(self, val, *args, **kwargs):
"""wrapper around `__mod__` (only works for scalar values"""
return self.__mod__(val)
# Sparse objects opt out of numexpr
SparsePanel._add_aggregate_operations(use_numexpr=False)
ops.add_special_arithmetic_methods(SparsePanel, use_numexpr=False, **ops.panel_special_funcs)
SparseWidePanel = SparsePanel
def _convert_frames(frames, index, columns, fill_value=np.nan, kind='block'):
from pandas.core.panel import _get_combined_index
output = {}
for item, df in compat.iteritems(frames):
if not isinstance(df, SparseDataFrame):
df = SparseDataFrame(df, default_kind=kind,
default_fill_value=fill_value)
output[item] = df
if index is None:
all_indexes = [df.index for df in output.values()]
index = _get_combined_index(all_indexes)
if columns is None:
all_columns = [df.columns for df in output.values()]
columns = _get_combined_index(all_columns)
index = _ensure_index(index)
columns = _ensure_index(columns)
for item, df in compat.iteritems(output):
if not (df.index.equals(index) and df.columns.equals(columns)):
output[item] = df.reindex(index=index, columns=columns)
return output, index, columns
def _stack_sparse_info(frame):
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
for col in frame.columns:
series = frame[col]
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
sparse_values = np.concatenate(vals_to_concat)
return sparse_values, major_labels, minor_labels
| gpl-3.0 |
sinhrks/scikit-learn | sklearn/tree/tests/test_tree.py | 32 | 52369 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
| bsd-3-clause |
micahcochran/geopandas | geopandas/_version.py | 3 | 16750 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.16 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "geopandas-"
cfg.versionfile_source = "geopandas/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| bsd-3-clause |
ati-ozgur/KDD99ReviewArticle | HelperCodes/create_table_JournalAndArticleCounts.py | 1 | 1930 | import ReviewHelper
import pandas as pd
df = ReviewHelper.get_pandas_data_frame_created_from_bibtex_file()
#df_journal = df.groupby('journal')["ID"]
dfJournalList = df.groupby(['journal'])['ID'].count().order(ascending=False)
isOdd = (dfJournalList.size % 2 == 1)
if (isOdd):
table_row_length = dfJournalList.size / 2 +1
else:
table_row_length = dfJournalList.size / 2
table_content_inside=""
for index in range(table_row_length):
journal_name_1column = dfJournalList.index[index]
journal_count_1column = dfJournalList[index]
second_column_index = index + table_row_length
if(second_column_index < dfJournalList.size):
journal_name_2column = dfJournalList.index[second_column_index]
journal_count_2column = dfJournalList[second_column_index]
else:
journal_name_2column = ""
journal_count_2column = ""
line = "{journal_name_1column} & {journal_count_1column} & {journal_name_2column} & {journal_count_2column} \\\\ \n".format(
journal_name_1column = journal_name_1column
,journal_count_1column = journal_count_1column
,journal_name_2column = journal_name_2column
,journal_count_2column = journal_count_2column
)
table_content_inside = table_content_inside + line
table_content_start = """
\\begin{table*}[!ht]
\\caption{ \\textbf{Journals and Article Counts} }
\\label{table-JournalAndArticleCounts}
\\centering
\\begin{adjustbox}{max width=\\textwidth}
\\normalsize
\\begin{tabular}{llll}
\\toprule
Journal Name & Article Count & Journal Name & Article Count \\\\
\\midrule
"""
table_content_end = """
\\bottomrule
\\end{tabular}
\\end{adjustbox}
\\end{table*}
"""
table_content_full = table_content_start + table_content_inside + table_content_end
filename = "../latex/table-JournalAndArticleCounts.tex"
target = open(filename, 'w')
target.write(table_content_full)
target.close()
| mit |
Unidata/MetPy | v0.9/_downloads/8591910a2b42dadcf3b05658ddd9c600/isentropic_example.py | 2 | 7222 | # Copyright (c) 2017,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
===================
Isentropic Analysis
===================
The MetPy function `mpcalc.isentropic_interpolation` allows for isentropic analysis from model
analysis data in isobaric coordinates.
"""
########################################
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, add_timestamp
from metpy.units import units
#######################################
# **Getting the data**
#
# In this example, NARR reanalysis data for 18 UTC 04 April 1987 from the National Centers
# for Environmental Information (https://www.ncdc.noaa.gov/data-access/model-data)
# will be used.
data = xr.open_dataset(get_test_data('narr_example.nc', False))
##########################
print(list(data.variables))
#############################
# We will reduce the dimensionality of the data as it is pulled in to remove an empty time
# dimension.
# Assign data to variable names
lat = data['lat']
lon = data['lon']
lev = data['isobaric']
times = data['time']
tmp = data['Temperature'][0]
uwnd = data['u_wind'][0]
vwnd = data['v_wind'][0]
spech = data['Specific_humidity'][0]
# pint doesn't understand gpm
data['Geopotential_height'].attrs['units'] = 'meter'
hgt = data['Geopotential_height'][0]
#############################
# To properly interpolate to isentropic coordinates, the function must know the desired output
# isentropic levels. An array with these levels will be created below.
isentlevs = [296.] * units.kelvin
####################################
# **Conversion to Isentropic Coordinates**
#
# Once three dimensional data in isobaric coordinates has been pulled and the desired
# isentropic levels created, the conversion to isentropic coordinates can begin. Data will be
# passed to the function as below. The function requires that isentropic levels, isobaric
# levels, and temperature be input. Any additional inputs (in this case relative humidity, u,
# and v wind components) will be linearly interpolated to isentropic space.
isent_anal = mpcalc.isentropic_interpolation(isentlevs,
lev,
tmp,
spech,
uwnd,
vwnd,
hgt,
tmpk_out=True)
#####################################
# The output is a list, so now we will separate the variables to different names before
# plotting.
isentprs, isenttmp, isentspech, isentu, isentv, isenthgt = isent_anal
isentu.ito('kt')
isentv.ito('kt')
########################################
# A quick look at the shape of these variables will show that the data is now in isentropic
# coordinates, with the number of vertical levels as specified above.
print(isentprs.shape)
print(isentspech.shape)
print(isentu.shape)
print(isentv.shape)
print(isenttmp.shape)
print(isenthgt.shape)
#################################
# **Converting to Relative Humidity**
#
# The NARR only gives specific humidity on isobaric vertical levels, so relative humidity will
# have to be calculated after the interpolation to isentropic space.
isentrh = 100 * mpcalc.relative_humidity_from_specific_humidity(isentspech, isenttmp, isentprs)
#######################################
# **Plotting the Isentropic Analysis**
# Set up our projection
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
# Coordinates to limit map area
bounds = [(-122., -75., 25., 50.)]
# Choose a level to plot, in this case 296 K
level = 0
fig = plt.figure(figsize=(17., 12.))
add_metpy_logo(fig, 120, 245, size='large')
ax = fig.add_subplot(1, 1, 1, projection=crs)
ax.set_extent(*bounds, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES, linewidth=0.5)
# Plot the surface
clevisent = np.arange(0, 1000, 25)
cs = ax.contour(lon, lat, isentprs[level, :, :], clevisent,
colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree())
ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7,
fmt='%i', rightside_up=True, use_clabeltext=True)
# Plot RH
cf = ax.contourf(lon, lat, isentrh[level, :, :], range(10, 106, 5),
cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5, pad=0.05,
extendrect='True')
cb.set_label('Relative Humidity', size='x-large')
# Plot wind barbs
ax.barbs(lon.values, lat.values, isentu[level, :, :].m, isentv[level, :, :].m, length=6,
regrid_shape=20, transform=ccrs.PlateCarree())
# Make some titles
ax.set_title('{:.0f} K Isentropic Pressure (hPa), Wind (kt), Relative Humidity (percent)'
.format(isentlevs[level].m), loc='left')
add_timestamp(ax, times[0].dt, y=0.02, high_contrast=True)
fig.tight_layout()
######################################
# **Montgomery Streamfunction**
#
# The Montgomery Streamfunction, :math:`{\psi} = gdz + CpT`, is often desired because its
# gradient is proportional to the geostrophic wind in isentropic space. This can be easily
# calculated with `mpcalc.montgomery_streamfunction`.
# Calculate Montgomery Streamfunction and scale by 10^-2 for plotting
msf = mpcalc.montgomery_streamfunction(isenthgt, isenttmp) / 100.
# Choose a level to plot, in this case 296 K
level = 0
fig = plt.figure(figsize=(17., 12.))
add_metpy_logo(fig, 120, 250, size='large')
ax = plt.subplot(111, projection=crs)
ax.set_extent(*bounds, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES.with_scale('50m'), linewidth=0.5)
# Plot the surface
clevmsf = np.arange(0, 4000, 5)
cs = ax.contour(lon, lat, msf[level, :, :], clevmsf,
colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree())
ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7,
fmt='%i', rightside_up=True, use_clabeltext=True)
# Plot RH
cf = ax.contourf(lon, lat, isentrh[level, :, :], range(10, 106, 5),
cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5, pad=0.05,
extendrect='True')
cb.set_label('Relative Humidity', size='x-large')
# Plot wind barbs.
ax.barbs(lon.values, lat.values, isentu[level, :, :].m, isentv[level, :, :].m, length=6,
regrid_shape=20, transform=ccrs.PlateCarree())
# Make some titles
ax.set_title('{:.0f} K Montgomery Streamfunction '.format(isentlevs[level].m) +
r'($10^{-2} m^2 s^{-2}$), ' +
'Wind (kt), Relative Humidity (percent)', loc='left')
add_timestamp(ax, times[0].dt, y=0.02, pretext='Valid: ', high_contrast=True)
fig.tight_layout()
plt.show()
| bsd-3-clause |
petebachant/PXL | pxl/tests/test_fdiff.py | 1 | 1436 | from __future__ import division, print_function
from .. import fdiff
from ..fdiff import *
import matplotlib.pyplot as plt
import pandas as pd
import os
import numpy as np
from uncertainties import unumpy
plot = False
def test_second_order_diff():
"""Test `second_order_diff`."""
# Create a non-equally spaced x vector
x = np.append(np.linspace(0, np.pi, 100),
np.linspace(np.pi + 0.01, 2*np.pi, 400))
u = np.sin(x)
dudx = second_order_diff(u, x)
assert dudx.shape == u.shape
# Assert that this function is almost identical to cos(x)
np.testing.assert_allclose(dudx, np.cos(x), rtol=1e-3)
if plot:
plt.plot(x, dudx, "-o", lw=2, alpha=0.5)
plt.plot(x, np.cos(x), "--^", lw=2, alpha=0.5)
plt.show()
def test_second_order_diff_uncertainties():
"""Test that `second_order_diff` works with uncertainties."""
# Create a non-equally spaced x vector
x = np.append(np.linspace(0, np.pi, 50),
np.linspace(np.pi + 0.01, 2*np.pi, 100))
x_unc = unumpy.uarray(x, np.ones(len(x))*1e-3)
u = unumpy.uarray(np.sin(x), np.ones(len(x))*1e-2)
dudx = second_order_diff(u, x)
print(dudx[:5])
print(dudx[-5:])
if plot:
plt.errorbar(x, unumpy.nominal_values(dudx), yerr=unumpy.std_devs(dudx),
fmt="-o", lw=2, alpha=0.5)
plt.plot(x, np.cos(x), "--^", lw=2, alpha=0.5)
plt.show()
| gpl-3.0 |
burjorjee/evolve-parities | evolveparities.py | 1 | 5098 | from contextlib import closing
from matplotlib.pyplot import plot, figure, hold, axis, ylabel, xlabel, savefig, title
from numpy import sort, logical_xor, transpose, logical_not
from numpy.numarray.functions import cumsum, zeros
from numpy.random import rand, shuffle
from numpy import mod, floor
import time
import cloud
from durus.file_storage import FileStorage
from durus.connection import Connection
def bitFreqVisualizer(effectiveAttrIndices, bitFreqs, gen):
f = figure(1)
n = len(bitFreqs)
hold(False)
plot(range(n), bitFreqs,'b.', markersize=10)
hold(True)
plot(effectiveAttrIndices, bitFreqs[effectiveAttrIndices],'r.', markersize=10)
axis([0, n-1, 0, 1])
title("Generation = %s" % (gen,))
ylabel('Frequency of the Bit 1')
xlabel('Locus')
f.canvas.draw()
f.show()
def showExperimentTimeStamps():
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
return conn.get_root().keys()
def neap_uga(m, n, gens, probMutation, effectiveAttrIndices, probMisclassification, bitFreqVisualizer=None):
""" neap = "noisy effective attribute parity"
"""
pop = rand(m,n)<0.5
bitFreqHist= zeros((n,gens+1))
for t in range(gens+1):
print "Generation %s" % t
bitFreqs = pop.astype('float').sum(axis=0)/m
bitFreqHist[:,t] = transpose(bitFreqs)
if bitFreqVisualizer:
bitFreqVisualizer(bitFreqs,t)
fitnessVals = mod(pop[:, effectiveAttrIndices].astype('byte').sum(axis=1) +
(rand(m) < probMisclassification).astype('byte'),2)
totalFitness = sum (fitnessVals)
cumNormFitnessVals = cumsum(fitnessVals).astype('float')/totalFitness
parentIndices = zeros(2*m, dtype='int16')
markers = sort(rand(2*m))
ctr = 0
for idx in xrange(2*m):
while markers[idx]>cumNormFitnessVals[ctr]:
ctr += 1
parentIndices[idx] = ctr
shuffle(parentIndices)
crossoverMasks = rand(m, n) < 0.5
newPop = zeros((m, n), dtype='bool')
newPop[crossoverMasks] = pop[parentIndices[:m], :][crossoverMasks]
newPop[logical_not(crossoverMasks)] = pop[parentIndices[m:], :][logical_not(crossoverMasks)]
mutationMasks = rand(m, n)<probMutation
pop = logical_xor(newPop,mutationMasks)
return bitFreqHist[0, :], bitFreqHist[-1, :]
def f(gens):
k = 7
n= k + 1
effectiveAttrIndices = range(k)
probMutation = 0.004
probMisclassification = 0.20
popSize = 1500
jid = cloud.call(neap_uga, **dict(m=popSize,
n=n,
gens=gens,
probMutation=probMutation,
effectiveAttrIndices=effectiveAttrIndices,
probMisclassification=probMisclassification))
print "Kicked off trial %s" % jid
return jid
def cloud_result(jid):
result = cloud.result(jid)
print "Retrieved results for trial %s" % jid
return result
def run_trials():
numTrials = 3000
gens = 1000
from multiprocessing.pool import ThreadPool as Pool
pool = Pool(50)
jids = pool.map(f,[gens]*numTrials)
print "Done spawning trials. Retrieving results..."
results = pool.map(cloud_result, jids)
firstLocusFreqsHists = zeros((numTrials,gens+1), dtype='float')
lastLocusFreqsHists = zeros((numTrials,gens+1), dtype='float')
print "Done retrieving results. Press Enter to serialize..."
raw_input()
for i, result in enumerate(results):
firstLocusFreqsHists[i, :], lastLocusFreqsHists[i, :] = result
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
conn.get_root()[str(int(floor(time.time())))] = (firstLocusFreqsHists, lastLocusFreqsHists)
conn.commit()
pool.close()
pool.join()
def render_results(timestamp=None):
with closing(FileStorage("results.durus")) as durus:
conn = Connection(durus)
db = conn.get_root()
if not timestamp:
timestamp = sorted(db.keys())[-1]
firstLocusFreqsHists, lastLocusFreqsHists = db[timestamp]
print "Done deserializing results. Plotting..."
x = [(2, 'First', firstLocusFreqsHists, "effective"),
(3, 'Last', lastLocusFreqsHists, "non-effective")]
for i, pos, freqsHists, filename in x :
freqsHists = freqsHists[:,:801]
f = figure(i)
hold(False)
plot(transpose(freqsHists), color='grey')
hold(True)
maxGens = freqsHists.shape[1]-1
plot([0, maxGens], [.05,.05], 'k--')
plot([0, maxGens], [.95,.95], 'k--')
axis([0, maxGens, 0, 1])
xlabel('Generation')
ylabel('1-Frequency of the '+pos+' Locus')
f.canvas.draw()
f.show()
savefig(filename+'.png', format='png', dpi=200)
if __name__ == "__main__":
cloud.start_simulator()
run_trials()
render_results()
print "Done plotting results. Press Enter to end..."
raw_input()
| gpl-3.0 |
cdek11/PLS | Code/PLS_Algorithm_Optimized.py | 2 | 5817 |
# coding: utf-8
# In[2]:
# Code to implement the optimized version of the PLS Algorithm
import pandas as pd
import numpy as np
import numba
from numba import jit
@jit
def mean_center_scale(dataframe):
'''Scale dataframe by subtracting mean and dividing by standard deviation'''
dataframe = dataframe - dataframe.mean()
dataframe = dataframe/dataframe.std()
return dataframe
@jit
def y_pred(Y_pred, i,b_dictionary,t_hat_dictionary,q_new_dictionary):
'''Find prediction for Y based on the number of components in this iteration'''
for j in range(1,i+1):
Y_pred = Y_pred + (b_dictionary[j]*t_hat_dictionary[j]).dot(q_new_dictionary[j].T)
return Y_pred
@jit
def rmse(i,Y_true, Y_pred, response_std, RMSE_dictionary):
'''Find training RMSE'''
RMSE = np.sqrt(sum((Y_true - Y_pred)**2)/Y_true.shape[0])
RMSE_scaled = RMSE * response_std
RMSE_dictionary[i] = RMSE_scaled
return RMSE_dictionary
@jit
def core_pls(i,Y, X, q_new_dictionary, b_dictionary, t_hat_dictionary) :
'''Core PLS algorithm'''
#Here we have one variable in the Y block so q = 1
#and omit steps 5-8
q = 1
#For the X block, u = Y
u = Y #random y column from Y #Step 1
w_old = np.dot(u.T,X)/np.dot(u.T,u) #Step 2
w_new = w_old/np.linalg.norm(w_old) #Step 3
t = np.dot(X,w_new.T)/np.dot(w_new,w_new.T) #Step 4
#For the Y block can be omitted if Y only has one variable
q_old = np.dot(t.T,Y)/np.dot(t.T,t) #Step 5
q_new = q_old/np.linalg.norm(q_old) #Step 6
q_new_dictionary[i] = q_new
u = np.dot(Y,q_new.T)/np.dot(q_new,q_new.T) #Step 7
#Step 8: Check convergence
#Calculate the X loadings and rescale the scores and weights accordingly
p = np.dot(t.T,X)/np.dot(t.T,t) #Step 9
p_new = p.T/np.linalg.norm(p.T) #Step 10
t_new = t/np.linalg.norm(p.T) #Step 11
w_new = w_old/np.linalg.norm(p) #Step 12
#Find the regression coefficient for b for th inner relation
b = np.dot(u.T,t_new)/np.dot(t.T,t) #Step 13
b_dictionary[i] = b
#Calculation of the residuals
E_h = X - np.dot(t_new,p_new.T)
F_h = Y - b.dot(t_new.T).T.dot(q) #WORKS BUT IS THIS RIGHT?
#Set outer relation for the X block
#Xres_dictionary[i] = E_h #MAYBE REMOVE
X = E_h
#Set the mixed relation for the Y block
#Yres_dictionary[i] = F_h 3MAYBE REMOVE
Y = F_h
#Find estimated t hat
t_hat = np.dot(E_h,w_new.T)
t_hat_dictionary[i] = t_hat
E_h = E_h - np.dot(t_hat,p_new.T)
return X,Y, u, w_new, q_new, t_new, p_new, q_new_dictionary, t_hat_dictionary, b_dictionary,E_h, F_h
def pls_optimized(path, path_test, predictors, response):
'''Function that takes a dataframe and runs partial least squares on numeric predictors for a numeric response.
Returns the residuals of the predictor (X block), response (Y block), and traininig RMSE'''
###TRAINING DATA
combined = predictors
#Load data
data = pd.DataFrame.from_csv(path)
combined.append(response)
data = data[combined]
response_std = data[response].std()
#Subtract the mean and scale each column
data = mean_center_scale(data)
#Separate in to design matrix (X block) and response column vector (Y block)
predictors.pop()
X = data[predictors].as_matrix()
Y = data[[response]].as_matrix()
Y_true = Y #For prediction
#Get rank of matrix
rank = np.linalg.matrix_rank(X)
u = Y #set initial u as Y
Xres_dictionary = {}
Yres_dictionary = {}
q_new_dictionary ={}
b_dictionary = {}
t_hat_dictionary = {}
t_hat_train_dictionary = {}
t_hat_test_dictionary = {}
RMSE_dictionary = {}
RMSE_test_dictionary = {}
###TEST DATA
#Load data
data_test = pd.DataFrame.from_csv(path_test)
combined.append(response)
data_test = data_test[combined]
response_std_test = data_test[response].std()
#Subtract the mean and scale each column
data_test = mean_center_scale(data_test)
#Separate in to design matrix (X block) and response column vector (Y block)
predictors.pop()
X_test = data[predictors].as_matrix()
Y_test = data[[response]].as_matrix()
Y_true_test = Y_test #For prediction
#Get rank of matrix
rank_test = np.linalg.matrix_rank(X_test)
#Iterate through each component
for i in range(1,(rank+1)):
Y_pred = np.zeros((Y_true.shape[0],1))
Y_pred_test = np.zeros((Y_true_test.shape[0],1))
#Core algo
X,Y, u, w_new, q_new, t_new, p_new, q_new_dictionary, t_hat_dictionary, b_dictionary,E_h, F_h = core_pls(i,Y, X, q_new_dictionary, b_dictionary, t_hat_dictionary)
#NEW Sum over different compenents
for g in range(1,i+1):
t_hat_train = np.dot(E_h,w_new.T)
t_hat_train_dictionary[g] = t_hat_train
E_h = E_h - np.dot(t_hat_train, p_new.T)
Y_pred = y_pred(Y_pred, g,b_dictionary,t_hat_dictionary,q_new_dictionary)
#Find training RMSE
RMSE_dictionary = rmse(i,Y_true, Y_pred, response_std, RMSE_dictionary)
#Set initial E_h as X_test data
E_h_test = X_test
#Sum over different compenents
for k in range(1,i+1):
t_hat_test = np.dot(E_h_test,w_new.T)
t_hat_test_dictionary[k] = t_hat_test
E_h_test = E_h_test - np.dot(t_hat_test, p_new.T)
Y_pred_test = y_pred(Y_pred_test, k,b_dictionary,t_hat_test_dictionary,q_new_dictionary)
#Find test RMSE
RMSE_test_dictionary = rmse(i,Y_true_test, Y_pred_test, response_std_test, RMSE_test_dictionary)
return RMSE_dictionary, RMSE_test_dictionary
| mit |
versae/DH2304 | data/arts1.py | 1 | 1038 | import numpy as np
import pandas as pd
arts = pd.DataFrame()
# Clean the dates so you only see numbers.
def clean_years(value):
result = value
chars_to_replace = ["c.", "©", ", CARCC", "no date", "n.d.", " SODRAC", ", CA", " CARCC", ""]
chars_to_split = ["-", "/"]
if isinstance(result, str):
for char in chars_to_split:
if char in result:
result = result.split(char)[1].strip()
for char in chars_to_replace:
result = result.replace(char, "")
if result == "":
return np.nan
else:
return int(result)
else:
return result
arts['execution_date'] = arts['execution_date'].apply(clean_years)
arts.head()
# If a year is lower than 100, then is referred to 1900. For example, 78 is actually 1978, and that needs to be fixed too.
def clean_year_99(value):
if value < 100:
return value + 1900
else:
return value
arts["execution_date"] = arts["execution_date"].apply(clean_year_99)
arts.head()
| mit |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| gpl-2.0 |
vtsuperdarn/davitpy | davitpy/pydarn/proc/music/music.py | 2 | 85275 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""music processing module
A module for running the MUltiple SIgnal Classification (MUSIC) algorithm for the detection of
MSTIDs and wave-like structures in SuperDARN data.
For usage examples, please see the iPython notebooks included in the docs folder of the DaViTPy distribution.
References
----------
See Samson et al. [1990] and Bristow et al. [1994] for details regarding the MUSIC algorithm and SuperDARN-observed MSTIDs.
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Samson, J. C., R. A. Greenwald, J. M. Ruohoniemi, A. Frey, and K. B. Baker (1990), Goose Bay radar observations of Earth-reflected,
atmospheric gravity waves in the high-latitude ionosphere, J. Geophys. Res., 95(A6), 7693-7709, doi:10.1029/JA095iA06p07693.
Module author:: Nathaniel A. Frissell, Fall 2013
Functions
--------------------------------------------------------------------------------------------------------------------------
getDataSet get music data object from music array object
stringify_signal convert dictionary to a string
stringify_signal_list convert list of dictionaries into strings
beamInterpolation interpolate music array object along beams
defineLimits set limits for chosen data set
checkDataQuality mark data as bad base on radar operations
applyLimits remove data outside of limits
determineRelativePosition find center of cell in music array object
timeInterpolation interpolate music array object along time
filterTimes calculate time range for data set
detrend linear detrend of music array/data object
nan_to_num convert undefined numbers to finite numbers
windowData apply window to music array object
calculateFFT calculate spectrum of an object
calculateDlm calculate the cross-spectral matrix of a musicArray/musicDataObj object.
calculateKarr calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
simulator insert a simulated MSTID into the processing chain.
scale_karr scale/normalize kArr for plotting and signal detection.
detectSignals detect local maxima of signals
add_signal add signal to detected signal list
del_signal remove signal from detected signal list
--------------------------------------------------------------------------------------------------------------------------
Classes
-----------------------------------------------------------
emptyObj create an empty object
SigDetect information about detected signals
musicDataObj basic container for holding MUSIC data.
musicArray container object for holding musicDataObj's
filter a filter object for VT sig/siStruct objects
-----------------------------------------------------------
"""
import numpy as np
import datetime
import time
import copy
import logging
Re = 6378 #Earth radius
def getDataSet(dataObj,dataSet='active'):
"""Returns a specified musicDataObj from a musicArray object. If the musicArray object has the exact attribute
specified in the dataSet keyword, then that attribute is returned. If not, all attributes of the musicArray object
will be searched for attributes which contain the string specified in the dataSet keyword. If more than one are
found, the last attribute of a sorted list will be returned. If no attributes are found which contain the specified
string, the 'active' dataSet is returned.
Parameters
----------
dataObj : musicArray
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj object
Written by Nathaniel A. Frissell, Fall 2013
"""
lst = dir(dataObj)
if dataSet not in lst:
tmp = []
for item in lst:
if dataSet in item:
tmp.append(item)
if len(tmp) == 0:
dataSet = 'active'
else:
tmp.sort()
dataSet = tmp[-1]
currentData = getattr(dataObj,dataSet)
return currentData
class emptyObj(object):
"""Create an empty object.
"""
def __init__(self):
pass
def stringify_signal(sig):
"""Method to convert a signal information dictionary into a string.
Parameters
----------
sig : dict
Information about a detected signal.
Returns
-------
sigInfo : str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
sigInfo = {}
if sig.has_key('order'):
sigInfo['order'] = '%d' % sig['order'] #Order of signals by strength as detected by image detection algorithm
if sig.has_key('kx'):
sigInfo['kx'] = '%.5f' % sig['kx']
if sig.has_key('ky'):
sigInfo['ky'] = '%.5f' % sig['ky']
if sig.has_key('k'):
sigInfo['k'] = '%.3f' % sig['k']
if sig.has_key('lambda'):
if np.isinf(sig['lambda']):
sigInfo['lambda'] = 'inf'
else:
sigInfo['lambda'] = '%d' % np.round(sig['lambda']) # km
if sig.has_key('lambda_x'):
if np.isinf(sig['lambda_x']):
sigInfo['lambda_x'] = 'inf'
else:
sigInfo['lambda_x'] = '%d' % np.round(sig['lambda_x']) # km
if sig.has_key('lambda_y'):
if np.isinf(sig['lambda_y']):
sigInfo['lambda_y'] = 'inf'
else:
sigInfo['lambda_y'] = '%d' % np.round(sig['lambda_y']) # km
if sig.has_key('azm'):
sigInfo['azm'] = '%d' % np.round(sig['azm']) # degrees
if sig.has_key('freq'):
sigInfo['freq'] = '%.2f' % (sig['freq']*1000.) # mHz
if sig.has_key('period'):
sigInfo['period'] = '%d' % np.round(sig['period']/60.) # minutes
if sig.has_key('vel'):
if np.isinf(np.round(sig['vel'])):
sigInfo['vel'] = 'Inf'
else:
sigInfo['vel'] = '%d' % np.round(sig['vel']) # km/s
if sig.has_key('area'):
sigInfo['area'] = '%d' % sig['area'] # Pixels
if sig.has_key('max'):
sigInfo['max'] = '%.4f' % sig['max'] # Value from kArr in arbitrary units, probably with some normalization
if sig.has_key('maxpos'):
sigInfo['maxpos'] = str(sig['maxpos']) # Index position in kArr of maximum value.
if sig.has_key('labelInx'):
sigInfo['labelInx'] = '%d' % sig['labelInx'] # Label value from image processing
if sig.has_key('serialNr'):
sigInfo['serialNr'] = '%d' % sig['serialNr'] # Label value from image processing
return sigInfo
def stringify_signal_list(signal_list,sort_key='order'):
"""Method to convert a list of signal dictionaries into strings.
Parameters
----------
signal_list : list of dict
Information about a detected signal.
sort_key : Optional[string]
Dictionary key to sort on, or None for no sort. 'order' will sort the signal list
from strongest signal to weakest, as determined by the MUSIC algorithm.
Returns
-------
stringInfo : list of str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
string_info = []
if sort_key is not None:
orders = [x[sort_key] for x in signal_list]
orders.sort()
for order in orders:
for sig in signal_list:
if sig[sort_key] == order:
string_info.append(stringify_signal(sig))
signal_list.remove(sig)
else:
for sig in signal_list:
string_info.append(stringify_signal(sig))
return string_info
class SigDetect(object):
"""Class to hold information about detected signals.
Methods
-------
string
reorder
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self):
pass
def string(self):
"""Method to convert a list of signal dictionaries into strings.
Returns
-------
stringInfo : list of str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
return stringify_signal_list(self.info)
def reorder(self):
"""Method to sort items in .info by signal maximum value (from the scaled kArr) and update nrSignals.
Written by Nathaniel A. Frissell, Fall 2013
"""
#Do the sorting...
from operator import itemgetter
newlist = sorted(self.info,key=itemgetter('max'),reverse=True)
#Put in the order numbers...
order = 1
for item in newlist:
item['order'] = order
order = order + 1
#Save the list to the dataObj...
self.info = newlist
#Update the nrSigs
self.nrSigs = len(newlist)
class musicDataObj(object):
"""This class is the basic container for holding MUSIC data.
Parameters
----------
time : list of datetime.datetime
list of times corresponding to data
data : numpy.array
3-dimensional array of data
fov : Optional[pydarn.radar.radFov.fov]
Radar field-of-view object.
comment : Optional[str]
String to be appended to the history of this object
parent : Optional[musicArray]
reference to parent musicArray object
**metadata
keywords sent to matplot lib, etc.
Attributes
----------
time : numpy.array of datetime.datetime
numpy array of times corresponding to data
data : numpy.array
3-dimensional array of data
fov : Optional[pydarn.radar.radFov.fov]
Radar field-of-view object.
metadata : dict
keywords sent to matplot lib, etc.
history : dict
Methods
---------
copy
setActive
nyquistFrequency
samplePeriod
applyLimits
setMetadata
printMetadata
appendHistory
printHistory
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self, time, data, fov=None, comment=None, parent=0, **metadata):
self.parent = parent
self.time = np.array(time)
self.data = np.array(data)
self.fov = fov
self.metadata = {}
for key in metadata: self.metadata[key] = metadata[key]
self.history = {datetime.datetime.now():comment}
def copy(self,newsig,comment):
"""Copy a musicDataObj object. This deep copies data and metadata, updates the serial
number, and logs a comment in the history. Methods such as plot are kept as a reference.
Parameters
----------
newsig : str
Name for the new musicDataObj object.
comment : str
Comment describing the new musicDataObj object.
Returns
-------
newsigobj : musicDataObj
Copy of the original musicDataObj with new name and history entry.
Written by Nathaniel A. Frissell, Fall 2013
"""
serial = self.metadata['serial'] + 1
newsig = '_'.join(['DS%03d' % serial,newsig])
setattr(self.parent,newsig,copy.copy(self))
newsigobj = getattr(self.parent,newsig)
newsigobj.time = copy.deepcopy(self.time)
newsigobj.data = copy.deepcopy(self.data)
newsigobj.fov = copy.deepcopy(self.fov)
newsigobj.metadata = copy.deepcopy(self.metadata)
newsigobj.history = copy.deepcopy(self.history)
newsigobj.metadata['dataSetName'] = newsig
newsigobj.metadata['serial'] = serial
newsigobj.history[datetime.datetime.now()] = '['+newsig+'] '+comment
return newsigobj
def setActive(self):
"""Sets this signal as the currently active signal.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.parent.active = self
def nyquistFrequency(self,timeVec=None):
"""Calculate the Nyquist frequency of a vt sigStruct signal.
Parameters
----------
timeVec : Optional[list of datetime.datetime]
List of datetime.datetime to use instead of self.time.
Returns
-------
nq : float
Nyquist frequency of the signal in Hz.
Written by Nathaniel A. Frissell, Fall 2013
"""
dt = self.samplePeriod(timeVec=timeVec)
nyq = float(1. / (2*dt))
return nyq
def samplePeriod(self,timeVec=None):
"""Calculate the sample period of a vt sigStruct signal.
Parameters
----------
timeVec : Optional[list of datetime.datetime]
List of datetime.datetime to use instead of self.time.
Returns
-------
samplePeriod : float
samplePeriod: sample period of signal in seconds.
Written by Nathaniel A. Frissell, Fall 2013
"""
if timeVec == None: timeVec = self.time
diffs = np.diff(timeVec)
diffs_unq = np.unique(diffs)
self.diffs = diffs_unq
if len(diffs_unq) == 1:
samplePeriod = diffs[0].total_seconds()
else:
diffs_sec = np.array([x.total_seconds() for x in diffs])
maxDt = np.max(diffs_sec)
avg = np.mean(diffs_sec)
md = self.metadata
warn = 'WARNING'
if md.has_key('title'): warn = ' '.join([warn,'FOR','"'+md['title']+'"'])
logging.warning(warn + ':')
logging.warning(' Date time vector is not regularly sampled!')
logging.warning(' Maximum difference in sampling rates is ' + str(maxDt) + ' sec.')
logging.warning(' Using average sampling period of ' + str(avg) + ' sec.')
samplePeriod = avg
import ipdb; ipdb.set_trace()
return samplePeriod
def applyLimits(self,rangeLimits=None,gateLimits=None,timeLimits=None,newDataSetName='limitsApplied',comment='Limits Applied'):
"""Removes data outside of the rangeLimits, gateLimits, and timeLimits boundaries.
Parameters
----------
rangeLimits : Optional[interable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
timeLimits : Optional[]
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
Returns
-------
newMusicDataObj : musicDataObj
New musicDataObj. The musicDataObj is also stored in it's parent musicArray object.
Written by Nathaniel A. Frissell, Fall 2013
"""
return applyLimits(self.parent,self.metadata['dataSetName'],rangeLimits=rangeLimits,gateLimits=gateLimits,timeLimits=timeLimits,newDataSetName=newDataSetName,comment=comment)
def setMetadata(self,**metadata):
"""Adds information to the current musicDataObj's metadata dictionary.
Metadata affects various plotting parameters and signal processing routinges.
Parameters
----------
**metadata :
keywords sent to matplot lib, etc.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.metadata = dict(self.metadata.items() + metadata.items())
def printMetadata(self):
"""Nicely print all of the metadata associated with the current musicDataObj object.
Written by Nathaniel A. Frissell, Fall 2013
"""
keys = self.metadata.keys()
keys.sort()
for key in keys:
print key+':',self.metadata[key]
def appendHistory(self,comment):
"""Add an entry to the processing history dictionary of the current musicDataObj object.
Parameters
----------
comment : string
Infomation to add to history dictionary.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.history[datetime.datetime.now()] = '['+self.metadata['dataSetName']+'] '+comment
def printHistory(self):
"""Nicely print all of the processing history associated with the current musicDataObj object.
Written by Nathaniel A. Frissell, Fall 2013
"""
keys = self.history.keys()
keys.sort()
for key in keys:
print key,self.history[key]
class musicArray(object):
"""This class is the basic container for holding MUSIC data.
Parameters
----------
myPtr : pydarn.sdio.radDataTypes.radDataPtr
contains the pipeline to the data we are after
sTime : Optional[datetime.datetime]
start time UT (if None myPtr.sTime is used)
eTime : Optional[datetime.datetime]
end time UT (if None myPtr.eTime is used)
param : Optional[str]
Radar FIT parameter to load and process. Any appropriate attribute of the
FIT data structure is allowed.
gscat : Optional[int]
Ground scatter flag.
0: all backscatter data
1: ground backscatter only
2: ionospheric backscatter only
3: all backscatter data with a ground backscatter flag.
fovElevation : Optional[float]
Passed directly to pydarn.radar.radFov.fov()
fovModel : Optional[str]
Scatter mapping model.
GS : Ground Scatter Mapping Model. See Bristow et al. [1994] (default)
IS : Standard SuperDARN scatter mapping model.
S : Standard projection model
E1 : for Chisham E-region 1/2-hop ionospheric projection model
F1 : for Chisham F-region 1/2-hop ionospheric projection model
F3 : for Chisham F-region 1 1/2-hop ionospheric projection model
C : Chisham projection model
None : if you trust your elevation or altitude values
fovCoords : Optional[str]
Map coordinate system. WARNING: 'geo' is curently only tested coordinate system.
full_array : Optional[bool]
If True, make the data array the full beam, gate dimensions listed in the hdw.dat file.
If False, truncate the array to the maximum dimensions that there is actually data.
False will save space without throwing out any data, but sometimes it is easier to work
with the full-size array.
Attributes
----------
messages : list
prm :
Methods
-------
get_data_sets
Example
-------
#Set basic event parameters.
rad ='wal'
sTime = datetime.datetime(2011,5,9,8,0)
eTime = datetime.datetime(2011,5,9,19,0)
#Connect to a SuperDARN data source.
myPtr = pydarn.sdio.radDataOpen(sTime,rad,eTime=eTime)
#Create the musicArray Object.
dataObj = music.musicArray(myPtr,fovModel='GS')
References
----------
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self,myPtr,sTime=None,eTime=None,param='p_l',gscat=1,
fovElevation=None,fovModel='GS',fovCoords='geo',full_array=False):
from davitpy import pydarn
# Create a list that can be used to store top-level messages.
self.messages = []
no_data_message = 'No data for this time period.'
# If no data, report and return.
if myPtr is None:
self.messages.append(no_data_message)
return
if sTime == None: sTime = myPtr.sTime
if eTime == None: eTime = myPtr.eTime
scanTimeList = []
dataList = []
cpidList = []
#Subscripts of columns in the dataList/dataArray
scanInx = 0
dateInx = 1
beamInx = 2
gateInx = 3
dataInx = 4
beamTime = sTime
scanNr = np.uint64(0)
fov = None
# Create a place to store the prm data.
prm = emptyObj()
prm.time = []
prm.mplgs = []
prm.nave = []
prm.noisesearch = []
prm.scan = []
prm.smsep = []
prm.mplgexs = []
prm.xcf = []
prm.noisesky = []
prm.rsep = []
prm.mppul = []
prm.inttsc = []
prm.frang = []
prm.bmazm = []
prm.lagfr = []
prm.ifmode = []
prm.noisemean = []
prm.tfreq = []
prm.inttus = []
prm.rxrise = []
prm.mpinc = []
prm.nrang = []
while beamTime < eTime:
#Load one scan into memory.
# myScan = pydarn.sdio.radDataRead.radDataReadScan(myPtr)
myScan = myPtr.readScan()
if myScan == None: break
goodScan = False # This flag turns to True as soon as good data is found for the scan.
for myBeam in myScan:
#Calculate the field of view if it has not yet been calculated.
if fov == None:
radStruct = pydarn.radar.radStruct.radar(radId=myPtr.stid)
site = pydarn.radar.radStruct.site(radId=myPtr.stid,dt=sTime)
fov = pydarn.radar.radFov.fov(frang=myBeam.prm.frang, rsep=myBeam.prm.rsep, site=site,elevation=fovElevation,model=fovModel,coords=fovCoords)
#Get information from each beam in the scan.
beamTime = myBeam.time
bmnum = myBeam.bmnum
# Save all of the radar operational parameters.
prm.time.append(beamTime)
prm.mplgs.append(myBeam.prm.mplgs)
prm.nave.append(myBeam.prm.nave)
prm.noisesearch.append(myBeam.prm.noisesearch)
prm.scan.append(myBeam.prm.scan)
prm.smsep.append(myBeam.prm.smsep)
prm.mplgexs.append(myBeam.prm.mplgexs)
prm.xcf.append(myBeam.prm.xcf)
prm.noisesky.append(myBeam.prm.noisesky)
prm.rsep.append(myBeam.prm.rsep)
prm.mppul.append(myBeam.prm.mppul)
prm.inttsc.append(myBeam.prm.inttsc)
prm.frang.append(myBeam.prm.frang)
prm.bmazm.append(myBeam.prm.bmazm)
prm.lagfr.append(myBeam.prm.lagfr)
prm.ifmode.append(myBeam.prm.ifmode)
prm.noisemean.append(myBeam.prm.noisemean)
prm.tfreq.append(myBeam.prm.tfreq)
prm.inttus.append(myBeam.prm.inttus)
prm.rxrise.append(myBeam.prm.rxrise)
prm.mpinc.append(myBeam.prm.mpinc)
prm.nrang.append(myBeam.prm.nrang)
#Get the fitData.
fitDataList = getattr(myBeam.fit,param)
slist = getattr(myBeam.fit,'slist')
gflag = getattr(myBeam.fit,'gflg')
if len(slist) > 1:
for (gate,data,flag) in zip(slist,fitDataList,gflag):
#Get information from each gate in scan. Skip record if the chosen ground scatter option is not met.
if (gscat == 1) and (flag == 0): continue
if (gscat == 2) and (flag == 1): continue
tmp = (scanNr,beamTime,bmnum,gate,data)
dataList.append(tmp)
goodScan = True
elif len(slist) == 1:
gate,data,flag = (slist[0],fitDataList[0],gflag[0])
#Get information from each gate in scan. Skip record if the chosen ground scatter option is not met.
if (gscat == 1) and (flag == 0): continue
if (gscat == 2) and (flag == 1): continue
tmp = (scanNr,beamTime,bmnum,gate,data)
dataList.append(tmp)
goodScan = True
else:
continue
if goodScan:
#Determine the start time for each scan and save to list.
scanTimeList.append(min([x.time for x in myScan]))
#Advance to the next scan number.
scanNr = scanNr + 1
#Convert lists to numpy arrays.
timeArray = np.array(scanTimeList)
dataListArray = np.array(dataList)
# If no data, report and return.
if dataListArray.size == 0:
self.messages.append(no_data_message)
return
#Figure out what size arrays we need and initialize the arrays...
nrTimes = int(np.max(dataListArray[:,scanInx]) + 1)
if full_array:
nrBeams = int(fov.beams.max() + 1)
nrGates = int(fov.gates.max() + 1)
else:
nrBeams = int(np.max(dataListArray[:,beamInx]) + 1)
nrGates = int(np.max(dataListArray[:,gateInx]) + 1)
#Make sure the FOV is the same size as the data array.
if len(fov.beams) != nrBeams:
fov.beams = fov.beams[0:nrBeams]
fov.latCenter = fov.latCenter[0:nrBeams,:]
fov.lonCenter = fov.lonCenter[0:nrBeams,:]
fov.slantRCenter = fov.slantRCenter[0:nrBeams,:]
fov.latFull = fov.latFull[0:nrBeams+1,:]
fov.lonFull = fov.lonFull[0:nrBeams+1,:]
fov.slantRFull = fov.slantRFull[0:nrBeams+1,:]
if len(fov.gates) != nrGates:
fov.gates = fov.gates[0:nrGates]
fov.latCenter = fov.latCenter[:,0:nrGates]
fov.lonCenter = fov.lonCenter[:,0:nrGates]
fov.slantRCenter = fov.slantRCenter[:,0:nrGates]
fov.latFull = fov.latFull[:,0:nrGates+1]
fov.lonFull = fov.lonFull[:,0:nrGates+1]
fov.slantRFull = fov.slantRFull[:,0:nrGates+1]
#Convert the dataListArray into a 3 dimensional array.
dataArray = np.ndarray([nrTimes,nrBeams,nrGates])
dataArray[:] = np.nan
for inx in range(len(dataListArray)):
dataArray[int(dataListArray[inx,scanInx]),int(dataListArray[inx,beamInx]),int(dataListArray[inx,gateInx])] = dataListArray[inx,dataInx]
#Make metadata block to hold information about the processing.
metadata = {}
metadata['dType'] = myPtr.dType
metadata['stid'] = myPtr.stid
metadata['name'] = radStruct.name
metadata['code'] = radStruct.code
metadata['fType'] = myPtr.fType
metadata['cp'] = myPtr.cp
metadata['channel'] = myPtr.channel
metadata['sTime'] = sTime
metadata['eTime'] = eTime
metadata['param'] = param
metadata['gscat'] = gscat
metadata['elevation'] = fovElevation
metadata['model'] = fovModel
metadata['coords'] = fovCoords
dataSet = 'DS000_originalFit'
metadata['dataSetName'] = dataSet
metadata['serial'] = 0
comment = '['+dataSet+'] '+ 'Original Fit Data'
#Save data to be returned as self.variables
setattr(self,dataSet,musicDataObj(timeArray,dataArray,fov=fov,parent=self,comment=comment))
newSigObj = getattr(self,dataSet)
setattr(newSigObj,'metadata',metadata)
#Set the new data active.
newSigObj.setActive()
#Make prm data part of the object.
self.prm = prm
def get_data_sets(self):
"""Return a sorted list of musicDataObj's contained in this musicArray.
Returns
-------
dataSets : list of str
Names of musicDataObj's contained in this musicArray.
Written by Nathaniel A. Frissell, Fall 2013
"""
attrs = dir(self)
dataSets = []
for item in attrs:
if item.startswith('DS'):
dataSets.append(item)
dataSets.sort()
return dataSets
def beamInterpolation(dataObj,dataSet='active',newDataSetName='beamInterpolated',comment='Beam Linear Interpolation'):
"""Interpolates the data in a musicArray object along the beams of the radar. This method will ensure that no
rangegates are missing data. Ranges outside of metadata['gateLimits'] will be set to 0.
The result is stored as a new musicDataObj in the given musicArray object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy.interpolate import interp1d
currentData = getDataSet(dataObj,dataSet)
nrTimes = len(currentData.time)
nrBeams = len(currentData.fov.beams)
nrGates = len(currentData.fov.gates)
interpArr = np.zeros([nrTimes,nrBeams,nrGates])
for tt in range(nrTimes):
for bb in range(nrBeams):
rangeVec = currentData.fov.slantRCenter[bb,:]
input_x = copy.copy(rangeVec)
input_y = currentData.data[tt,bb,:]
#If metadata['gateLimits'], select only those measurements...
if currentData.metadata.has_key('gateLimits'):
limits = currentData.metadata['gateLimits']
gateInx = np.where(np.logical_and(currentData.fov.gates >= limits[0],currentData.fov.gates <= limits[1]))[0]
if len(gateInx) < 2: continue
input_x = input_x[gateInx]
input_y = input_y[gateInx]
good = np.where(np.isfinite(input_y))[0]
if len(good) < 2: continue
input_x = input_x[good]
input_y = input_y[good]
intFn = interp1d(input_x,input_y,bounds_error=False,fill_value=0)
interpArr[tt,bb,:] = intFn(rangeVec)
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = interpArr
newDataSet.setActive()
def defineLimits(dataObj,dataSet='active',rangeLimits=None,gateLimits=None,beamLimits=None,timeLimits=None):
"""Sets the range, gate, beam, and time limits for the chosen data set. This method only changes metadata;
it does not create a new data set or alter the data in any way. If you specify rangeLimits, they will be changed to correspond
with the center value of the range cell. Gate limits always override range limits.
Use the applyLimits() method to remove data outside of the data limits.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
rangeLimits : Optional[iterable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
beamLimits : Optional[iterable]
Two-element array defining the maximum and minumum beams to use.
timeLimits : Optional[iterable]
Two-element array of datetime.datetime objects defining the maximum and minumum times to use.
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
try:
if (rangeLimits != None) or (gateLimits != None):
if (rangeLimits != None) and (gateLimits == None):
inx = np.where(np.logical_and(currentData.fov.slantRCenter >= rangeLimits[0],currentData.fov.slantRCenter <= rangeLimits[1]))
gateLimits = [np.min(inx[1][:]),np.max(inx[1][:])]
if gateLimits != None:
rangeMin = np.int(np.min(currentData.fov.slantRCenter[:,gateLimits[0]]))
rangeMax = np.int(np.max(currentData.fov.slantRCenter[:,gateLimits[1]]))
rangeLimits = [rangeMin,rangeMax]
currentData.metadata['gateLimits'] = gateLimits
currentData.metadata['rangeLimits'] = rangeLimits
if beamLimits != None:
currentData.metadata['beamLimits'] = beamLimits
if timeLimits != None:
currentData.metadata['timeLimits'] = timeLimits
except:
logging.warning("An error occured while defining limits. No limits set. Check your input values.")
def checkDataQuality(dataObj,dataSet='active',max_off_time=10,sTime=None,eTime=None):
"""Mark the data set as bad (metadata['good_period'] = False) if the radar was not operational within the chosen time period
for a specified length of time.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
max_off_time : Optional[int/float]
Maximum length in minutes radar may remain off.
sTime : Optional[datetime.datetime]
Starting time of checking period. If None, min(currentData.time) is used.
eTime : Optional[datetime.datetime]
End time of checking period. If None, max(currentData.time) is used.
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
if sTime is None:
sTime = np.min(currentData.time)
if eTime is None:
eTime = np.max(currentData.time)
time_vec = currentData.time[np.logical_and(currentData.time > sTime, currentData.time < eTime)]
time_vec = np.concatenate(([sTime],time_vec,[eTime]))
max_diff = np.max(np.diff(time_vec))
if max_diff > datetime.timedelta(minutes=max_off_time):
currentData.setMetadata(good_period=False)
else:
currentData.setMetadata(good_period=True)
return dataObj
def applyLimits(dataObj,dataSet='active',rangeLimits=None,gateLimits=None,timeLimits=None,newDataSetName='limitsApplied',comment=None):
"""Removes data outside of the rangeLimits and gateLimits boundaries.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
rangeLimits : Optional[iterable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
beamLimits : Optional[iterable]
Two-element array defining the maximum and minumum beams to use.
timeLimits : Optional[iterable]
Two-element array of datetime.datetime objects defining the maximum and minumum times to use.
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Returns
-------
newData : musicDataObj
Processed version of input musicDataObj (if succeeded), or the original musicDataObj (if failed).
Written by Nathaniel A. Frissell, Fall 2013
"""
if (rangeLimits != None) or (gateLimits != None) or (timeLimits != None):
defineLimits(dataObj,dataSet='active',rangeLimits=rangeLimits,gateLimits=gateLimits,timeLimits=timeLimits)
currentData = getDataSet(dataObj,dataSet)
try:
#Make a copy of the current data set.
commentList = []
if (currentData.metadata.has_key('timeLimits') == False and
currentData.metadata.has_key('beamLimits') == False and
currentData.metadata.has_key('gateLimits') == False):
return currentData
newData = currentData.copy(newDataSetName,comment)
#Apply the gateLimits
if currentData.metadata.has_key('gateLimits'):
limits = currentData.metadata['gateLimits']
gateInx = np.where(np.logical_and(currentData.fov.gates >= limits[0],currentData.fov.gates<= limits[1]))[0]
newData.data = newData.data[:,:,gateInx]
newData.fov.gates = newData.fov.gates[gateInx]
newData.fov.latCenter = newData.fov.latCenter[:,gateInx]
newData.fov.lonCenter = newData.fov.lonCenter[:,gateInx]
newData.fov.slantRCenter = newData.fov.slantRCenter[:,gateInx]
#Update the full FOV.
#This works as long as we look at only consecutive gates. If we ever do something where we are not looking at consecutive gates
#(typically for computational speed reasons), we will have to do something else.
gateInxFull = np.append(gateInx,gateInx[-1]+1) #We need that extra gate since this is the full FOV.
newData.fov.latFull = newData.fov.latFull[:,gateInxFull]
newData.fov.lonFull = newData.fov.lonFull[:,gateInxFull]
newData.fov.slantRFull = newData.fov.slantRFull[:,gateInxFull]
commentList.append('gate: %i,%i' % tuple(limits))
rangeLim = (np.min(newData.fov.slantRCenter), np.max(newData.fov.slantRCenter))
commentList.append('range [km]: %i,%i' % rangeLim)
#Remove limiting item from metadata.
newData.metadata.pop('gateLimits')
if newData.metadata.has_key('rangeLimits'): newData.metadata.pop('rangeLimits')
#Apply the beamLimits.
if currentData.metadata.has_key('beamLimits'):
limits = currentData.metadata['beamLimits']
beamInx = np.where(np.logical_and(currentData.fov.beams >= limits[0],currentData.fov.beams <= limits[1]))[0]
newData.data = newData.data[:,beamInx,:]
newData.fov.beams = newData.fov.beams[beamInx]
newData.fov.latCenter = newData.fov.latCenter[beamInx,:]
newData.fov.lonCenter = newData.fov.lonCenter[beamInx,:]
newData.fov.slantRCenter = newData.fov.slantRCenter[beamInx,:]
#Update the full FOV.
#This works as long as we look at only consecutive gates. If we ever do something where we are not looking at consecutive gates
#(typically for computational speed reasons), we will have to do something else.
beamInxFull = np.append(beamInx,beamInx[-1]+1) #We need that extra beam since this is the full FOV.
newData.fov.latFull = newData.fov.latFull[beamInxFull,:]
newData.fov.lonFull = newData.fov.lonFull[beamInxFull,:]
newData.fov.slantRFull = newData.fov.slantRFull[beamInxFull,:]
commentList.append('beam: %i,%i' % tuple(limits))
#Remove limiting item from metadata.
newData.metadata.pop('beamLimits')
#Apply the time limits.
if currentData.metadata.has_key('timeLimits'):
limits = currentData.metadata['timeLimits']
timeInx = np.where(np.logical_and(currentData.time >= limits[0],currentData.time <= limits[1]))[0]
newData.data = newData.data[timeInx,:,:]
newData.time = newData.time[timeInx]
commentList.append('time: '+limits[0].strftime('%Y-%m-%d/%H:%M,')+limits[1].strftime('%Y-%m-%d/%H:%M'))
#Remove limiting item from metadata.
newData.metadata.pop('timeLimits')
#Update the history with what limits were applied.
comment = 'Limits Applied'
commentStr = '['+newData.metadata['dataSetName']+'] '+comment+': '+'; '.join(commentList)
key = max(newData.history.keys())
newData.history[key] = commentStr
logging.debug(commentStr)
newData.setActive()
return newData
except:
if hasattr(dataObj,newDataSetName): delattr(dataObj,newDataSetName)
# print 'Warning! Limits not applied.'
return currentData
def determineRelativePosition(dataObj,dataSet='active',altitude=250.):
"""Finds the center cell of the field-of-view of a musicArray data object.
The range, azimuth, x-range, and y-range from the center to each cell in the FOV
is calculated and saved to the FOV object. The following objects are added to
dataObj.dataSet:
fov.relative_centerInx: [beam, gate] index of the center cell
fov.relative_azm: Azimuth relative to center cell [deg]
fov.relative_range: Range relative to center cell [km]
fov.relative_x: X-range relative to center cell [km]
fov.relative_y: Y-range relative to center cell [km]
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
altitude : Optional[float]
altitude added to Re = 6378.1 km [km]
Returns
-------
None
Written by Nathaniel A. Frissell, Fall 2013
"""
from davitpy import utils
#Get the chosen dataset.
currentData = getDataSet(dataObj,dataSet)
#Determine center beam.
ctrBeamInx = len(currentData.fov.beams)/2
ctrGateInx = len(currentData.fov.gates)/2
currentData.fov.relative_centerInx = [ctrBeamInx, ctrGateInx]
#Set arrays of lat1/lon1 to the center cell value. Use this to calculate all other positions
#with numpy array math.
lat1 = np.zeros_like(currentData.fov.latCenter)
lon1 = np.zeros_like(currentData.fov.latCenter)
lat1[:] = currentData.fov.latCenter[ctrBeamInx,ctrGateInx]
lon1[:] = currentData.fov.lonCenter[ctrBeamInx,ctrGateInx]
#Make lat2/lon2 the center position array of the dataset.
lat2 = currentData.fov.latCenter
lon2 = currentData.fov.lonCenter
#Calculate the azimuth and distance from the centerpoint to the endpoint.
azm = utils.greatCircleAzm(lat1,lon1,lat2,lon2)
dist = (Re + altitude)*utils.greatCircleDist(lat1,lon1,lat2,lon2)
#Save calculated values to the current data object, as well as calculate the
#X and Y relatvie positions of each cell.
currentData.fov.relative_azm = azm
currentData.fov.relative_range = dist
currentData.fov.relative_x = dist * np.sin(np.radians(azm))
currentData.fov.relative_y = dist * np.cos(np.radians(azm))
return None
def timeInterpolation(dataObj,dataSet='active',newDataSetName='timeInterpolated',comment='Time Linear Interpolation',timeRes=10,newTimeVec=None):
"""Interpolates the data in a musicArray object to a regular time grid.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
timeRes : Optional[float]
time resolution of new time vector [seconds]
newTimeVec : Optional[list of datetime.datetime]
Sequence of datetime.datetime objects that data will be interpolated to. This overides timeRes.
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy.interpolate import interp1d
from davitpy import utils
currentData = getDataSet(dataObj,dataSet)
sTime = currentData.time[0]
sTime = datetime.datetime(sTime.year,sTime.month,sTime.day,sTime.hour,sTime.minute) #Make start time a round time.
fTime = currentData.time[-1]
#Create new time vector.
if newTimeVec == None:
newTimeVec = [sTime]
while newTimeVec[-1] < fTime:
newTimeVec.append(newTimeVec[-1] + datetime.timedelta(seconds=timeRes))
#Ensure that the new time vector is within the bounds of the actual data set.
newTimeVec = np.array(newTimeVec)
good = np.where(np.logical_and(newTimeVec > min(currentData.time),newTimeVec < max(currentData.time)))
newTimeVec = newTimeVec[good]
newEpochVec = utils.datetimeToEpoch(newTimeVec)
#Initialize interpolated data.
nrTimes = len(newTimeVec)
nrBeams = len(currentData.fov.beams)
nrGates = len(currentData.fov.gates)
interpArr = np.zeros([nrTimes,nrBeams,nrGates])
for rg in range(nrGates):
for bb in range(nrBeams):
input_x = currentData.time[:]
input_y = currentData.data[:,bb,rg]
good = np.where(np.isfinite(input_y))[0]
if len(good) < 2: continue
input_x = input_x[good]
input_y = input_y[good]
input_x = utils.datetimeToEpoch(input_x)
intFn = interp1d(input_x,input_y,bounds_error=False)#,fill_value=0)
interpArr[:,bb,rg] = intFn(newEpochVec)
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.time = newTimeVec
newDataSet.data = interpArr
newDataSet.setActive()
def filterTimes(sTime,eTime,timeRes,numTaps):
"""The linear filter is going to cause a delay in the signal and also won't get to the end of the signal.
This function will calcuate the full time period of data that needs to be loaded in order to provide filtered data
for the event requested.
Parameters
----------
sTime : datetime.datetime
Start time of event.
eTime : datetime.datetime
End time of event.
timeRes : float
Time resolution in seconds of data to be sent to filter.
numtaps : int
Length of the filter
Returns
-------
newSTime, newETime : datetime.datetime, datetime.datetime
Start and end times of data that needs to be fed into the filter.
Written by Nathaniel A. Frissell, Fall 2013
"""
td = datetime.timedelta(seconds=(numTaps*timeRes/2.))
newSTime = sTime - td
newETime = eTime + td
return (newSTime, newETime)
class filter(object):
"""Filter a VT sig/sigStruct object and define a FIR filter object.
If only cutoff_low is defined, this is a high pass filter.
If only cutoff_high is defined, this is a low pass filter.
If both cutoff_low and cutoff_high is defined, this is a band pass filter.
Uses scipy.signal.firwin()
High pass and band pass filters inspired by Matti Pastell's page:
http://mpastell.com/2010/01/18/fir-with-scipy/
Metadata keys:
'filter_cutoff_low' --> cutoff_low
'filter_cutoff_high' --> cutoff_high
'filter_numtaps' --> cutoff_numtaps
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
numtaps : Optional[int]
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
If dataObj.dataSet.metadata['filter_numptaps'] is set and this keyword is None,
the metadata value will be used.
cutoff_low : Optional[float, 1D array_like or None]
High pass cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`. If None, a low-pass filter will not
be applied.
If dataObj.dataSet.metadata['filter_cutoff_low'] is set and this keyword is None,
the metadata value will be used.
cutoff_high : Optional[float, 1D array_like, or None]
Like cutoff_low, but this is the low pass cutoff frequency of the filter.
If dataObj.dataSet.metadata['filter_cutoff_high'] is set and this keyword is None,
the metadata value will be used.
width : Optional[float]
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : Optional[string or tuple of string and parameter values]
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : Optional[bool]
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : Optional[bool]
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero is True);
nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
Attributes
----------
comment : str
cutoff_low : float, 1D array_like or None
High pass cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges).
cutoff_high : float, 1D array_like, or None
Like cutoff_low, but this is the low pass cutoff frequency of the filter.
nyq : float
the Nyquist rate
ir :
Methods
-------
plotTransferFunction
plotImpulseResponse
filter
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self, dataObj, dataSet='active', numtaps=None, cutoff_low=None, cutoff_high=None, width=None, window='blackman', pass_zero=True, scale=True,newDataSetName='filtered'):
import scipy as sp
sigObj = getattr(dataObj,dataSet)
nyq = sigObj.nyquistFrequency()
#Get metadata for cutoffs and numtaps.
md = sigObj.metadata
if cutoff_high == None:
if md.has_key('filter_cutoff_high'):
cutoff_high = md['filter_cutoff_high']
if cutoff_low == None:
if md.has_key('filter_cutoff_low'):
cutoff_low = md['filter_cutoff_low']
if numtaps == None:
if md.has_key('filter_numtaps'):
numtaps = md['filter_numtaps']
else:
logging.warning('You must provide numtaps.')
return
if cutoff_high != None: #Low pass
lp = sp.signal.firwin(numtaps=numtaps, cutoff=cutoff_high, width=width, window=window, pass_zero=pass_zero, scale=scale, nyq=nyq)
d = lp
if cutoff_low != None: #High pass
hp = -sp.signal.firwin(numtaps=numtaps, cutoff=cutoff_low, width=width, window=window, pass_zero=pass_zero, scale=scale, nyq=nyq)
hp[numtaps/2] = hp[numtaps/2] + 1
d = hp
if cutoff_high != None and cutoff_low != None:
d = -(lp+hp)
d[numtaps/2] = d[numtaps/2] + 1
d = -1.*d #Needed to correct 180 deg phase shift.
if cutoff_high == None and cutoff_low == None:
logging.warning("You must define cutoff frequencies!")
return
self.comment = ' '.join(['Filter:',window+',','Nyquist:',str(nyq),'Hz,','Cuttoff:','['+str(cutoff_low)+', '+str(cutoff_high)+']','Hz,','Numtaps:',str(numtaps)])
self.cutoff_low = cutoff_low
self.cutoff_high = cutoff_high
self.nyq = nyq
self.ir = d
self.filter(dataObj,dataSet=dataSet,newDataSetName=newDataSetName)
def __str__(self):
return self.comment
def plotTransferFunction(self,xmin=0,xmax=None,ymin_mag=-150,ymax_mag=5,ymin_phase=None,ymax_phase=None,worN=None,fig=None):
import scipy as sp
"""Plot the frequency and phase response of the filter object.
Parameters
----------
xmin : Optional[float]
Minimum value for x-axis.
xmax : Optional[float]
Maximum value for x-axis.
ymin_mag : Optional[float]
Minimum value for y-axis for the frequency response plot.
ymax_mag : Optional[float]
Maximum value for y-axis for the frequency response plot.
ymin_phase : Optional[float]
Minimum value for y-axis for the phase response plot.
ymax_phase : Optional[float]
Maximum value for y-axis for the phase response plot.
worN : Optional[int]
passed to scipy.signal.freqz()
If None, then compute at 512 frequencies around the unit circle.
If the len(filter) > 512, then compute at len(filter) frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
fig : Optional[matplotlib.Figure]
Figure object on which to plot. If None, a figure will be created.
Returns
-------
fig : matplotlib.Figure
Figure object containing the plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig == None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(20,10))
if worN == None:
if len(self.ir) > 512: worN = len(self.ir)
else: worN = None
else: pass
w,h = sp.signal.freqz(self.ir,1,worN=worN)
h_dB = 20 * np.log10(abs(h))
axis = fig.add_subplot(211)
#Compute frequency vector.
w = w/max(w) * self.nyq
axis.plot(w,h_dB,'.-')
#mp.axvline(x=self.fMax,color='r',ls='--',lw=2)
if xmin is not None: axis.set_xlim(xmin=xmin)
if xmax is not None: axis.set_xlim(xmax=xmax)
if ymin_mag is not None: axis.set_ylim(ymin=ymin_mag)
if ymax_mag is not None: axis.set_ylim(ymax=ymax_mag)
axis.set_xlabel(r'Frequency (Hz)')
axis.set_ylabel('Magnitude (db)')
axis.set_title(r'Frequency response')
axis = fig.add_subplot(212)
h_Phase = np.unwrap(np.arctan2(np.imag(h),np.real(h)))
axis.plot(w,h_Phase,'.-')
if xmin is not None: axis.set_xlim(xmin=xmin)
if xmax is not None: axis.set_xlim(xmax=xmax)
if ymin_phase is not None: axis.set_ylim(ymin=ymin_phase)
if ymax_phase is not None: axis.set_ylim(ymax=ymax_phase)
axis.set_ylabel('Phase (radians)')
axis.set_xlabel(r'Frequency (Hz)')
axis.set_title(r'Phase response')
fig.suptitle(self.comment)
fig.subplots_adjust(hspace=0.5)
return fig
def plotImpulseResponse(self,xmin=None,xmax=None,ymin_imp=None,ymax_imp=None,ymin_step=None,ymax_step=None,fig=None):
import scipy as sp
"""Plot the frequency and phase response of the filter object.
Parameters
----------
xmin : Optional[float]
Minimum value for x-axis.
xmax : Optional[float]
Maximum value for x-axis.
ymin_imp : Optional[float]
Minimum value for y-axis for the impulse response plot.
ymax_imp : Optional[float]
Maximum value for y-axis for the impulse response plot.
ymin_step : Optional[float]
Minimum value for y-axis for the step response plot.
ymax_step : Optional[float]
Maximum value for y-axis for the step response plot.
fig : Optional[matplotlib.Figure]
Figure object on which to plot. If None, a figure will be created.
Returns
-------
fig : matplotlib.Figure
Figure object containing the plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig == None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(20,10))
l = len(self.ir)
impulse = np.repeat(0.,l); impulse[0] =1.
x = np.arange(0,l)
response = sp.signal.lfilter(self.ir,1,impulse)
axis = fig.add_subplot(211)
axis.stem(x, response)
axis.set_ylabel('Amplitude')
axis.set_xlabel(r'n (samples)')
axis.set_title(r'Impulse response')
axis = fig.add_subplot(212)
step = np.cumsum(response)
axis.stem(x, step)
axis.set_ylabel('Amplitude')
axis.set_xlabel(r'n (samples)')
axis.set_title(r'Step response')
fig.suptitle(self.comment)
fig.subplots_adjust(hspace=0.5)
return fig
def filter(self,dataObj,dataSet='active',newDataSetName='filtered'):
"""Apply the filter to a vtsig object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
sigobj = getattr(dataObj,dataSet)
vtsig = sigobj.parent
nrTimes,nrBeams,nrGates = np.shape(sigobj.data)
#Filter causes a delay in the signal and also doesn't get the tail end of the signal... Shift signal around, provide info about where the signal is valid.
shift = np.int32(-np.floor(len(self.ir)/2.))
start_line = np.zeros(nrTimes)
start_line[0] = 1
start_line = np.roll(start_line,shift)
tinx0 = abs(shift)
tinx1 = np.where(start_line == 1)[0][0]
val_tm0 = sigobj.time[tinx0]
val_tm1 = sigobj.time[tinx1]
filteredData = np.zeros_like(sigobj.data)
#Apply filter
for bm in range(nrBeams):
for rg in range(nrGates):
tmp = sp.signal.lfilter(self.ir,[1.0],sigobj.data[:,bm,rg])
tmp = np.roll(tmp,shift)
filteredData[:,bm,rg] = tmp[:]
#Create new signal object.
newsigobj = sigobj.copy(newDataSetName,self.comment)
#Put in the filtered data.
newsigobj.data = copy.copy(filteredData)
newsigobj.time = copy.copy(sigobj.time)
#Clear out ymin and ymax from metadata; make sure meta data block exists.
#If not, create it.
if hasattr(newsigobj,'metadata'):
delMeta = ['ymin','ymax','ylim']
for key in delMeta:
if newsigobj.metadata.has_key(key):
del newsigobj.metadata[key]
else:
newsigobj.metadata = {}
newsigobj.metadata['timeLimits'] = (val_tm0,val_tm1)
key = 'title'
if newsigobj.metadata.has_key(key):
newsigobj.metadata[key] = ' '.join(['Filtered',newsigobj.metadata[key]])
else:
newsigobj.metadata[key] = 'Filtered'
newsigobj.metadata['fir_filter'] = (self.cutoff_low,self.cutoff_high)
newsigobj.setActive()
def detrend(dataObj,dataSet='active',newDataSetName='detrended',comment=None,type='linear'):
"""Linearly detrend a data in a musicArray/musicDataObj object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
type : Optional[str]
The type of detrending. If type == 'linear' (default), the result of a linear least-squares fit to data
is subtracted from data. If type == 'constant', only the mean of data is subtracted.
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
newDataArr= np.zeros_like(currentData.data)
for bm in range(nrBeams):
for rg in range(nrGates):
try:
newDataArr[:,bm,rg] = sp.signal.detrend(currentData.data[:,bm,rg],type=type)
except:
newDataArr[:,bm,rg] = np.nan
if comment == None:
comment = type.capitalize() + ' detrend (scipy.signal.detrend)'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = newDataArr
newDataSet.setActive()
def nan_to_num(dataObj,dataSet='active',newDataSetName='nan_to_num',comment=None):
"""Convert all NANs and INFs to finite numbers using numpy.nan_to_num().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
if comment == None:
comment = 'numpy.nan_to_num'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = np.nan_to_num(currentData.data)
newDataSet.setActive()
def windowData(dataObj,dataSet='active',newDataSetName='windowed',comment=None,window='hann'):
"""Apply a window to a musicArray object. The window is calculated using scipy.signal.get_window().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
window : Optional[str]
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall,
barthann, kaiser (needs beta), gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
win = sp.signal.get_window(window,nrTimes,fftbins=False)
newDataArr= np.zeros_like(currentData.data)
for bm in range(nrBeams):
for rg in range(nrGates):
newDataArr[:,bm,rg] = currentData.data[:,bm,rg] * win
if comment == None:
comment = window.capitalize() + ' window applied (scipy.signal.get_window)'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = newDataArr
newDataSet.setActive()
def calculateFFT(dataObj,dataSet='active',comment=None):
"""Calculate the spectrum of an object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
#Determine frequency axis.
nyq = currentData.nyquistFrequency()
freq_ax = np.arange(nrTimes,dtype='f8')
freq_ax = (freq_ax / max(freq_ax)) - 0.5
freq_ax = freq_ax * 2. * nyq
#Use complex64, not complex128! If you use complex128, too much numerical noise will accumulate and the final plot will be bad!
newDataArr= np.zeros((nrTimes,nrBeams,nrGates),dtype=np.complex64)
for bm in range(nrBeams):
for rg in range(nrGates):
newDataArr[:,bm,rg] = sp.fftpack.fftshift(sp.fftpack.fft(currentData.data[:,bm,rg])) / np.size(currentData.data[:,bm,rg])
currentData.freqVec = freq_ax
currentData.spectrum = newDataArr
# Calculate the dominant frequency #############################################
posFreqInx = np.where(currentData.freqVec >= 0)[0]
posFreqVec = currentData.freqVec[posFreqInx]
npf = len(posFreqVec) #Number of positive frequencies
data = np.abs(currentData.spectrum[posFreqInx,:,:]) #Use the magnitude of the positive frequency data.
#Average Power Spectral Density
avg_psd = np.zeros(npf)
for x in range(npf): avg_psd[x] = np.mean(data[x,:,:])
currentData.dominantFreq = posFreqVec[np.argmax(avg_psd)]
currentData.appendHistory('Calculated FFT')
def calculateDlm(dataObj,dataSet='active',comment=None):
"""Calculate the cross-spectral matrix of a musicaArray object. FFT must already have been calculated.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
nCells = nrBeams * nrGates
currentData.llLookupTable = np.zeros([5,nCells])
currentData.Dlm = np.zeros([nCells,nCells],dtype=np.complex128)
#Only use positive frequencies...
posInx = np.where(currentData.freqVec > 0)[0]
#Explicitly write out gate/range indices...
llList = []
for gg in xrange(nrGates):
for bb in xrange(nrBeams):
llList.append((bb,gg))
for ll in range(nCells):
llAI = llList[ll]
ew_dist = currentData.fov.relative_x[llAI]
ns_dist = currentData.fov.relative_y[llAI]
currentData.llLookupTable[:,ll] = [ll, currentData.fov.beams[llAI[0]], currentData.fov.gates[llAI[1]],ns_dist,ew_dist]
spectL = currentData.spectrum[posInx,llAI[0],llAI[1]]
for mm in range(nCells):
mmAI = llList[mm]
spectM = currentData.spectrum[posInx,mmAI[0],mmAI[1]]
currentData.Dlm[ll,mm] = np.sum(spectL * np.conj(spectM))
currentData.appendHistory('Calculated Cross-Spectral Matrix Dlm')
def calculateKarr(dataObj,dataSet='active',kxMax=0.05,kyMax=0.05,dkx=0.001,dky=0.001,threshold=0.15):
"""Calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
Cross-spectrum array Dlm must already have been calculated.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
kxMax : Optional[float]
Maximum kx (East-West) wavenumber to calculate [rad/km]
kyMax : Optional[float]
Maximum ky (North-South) wavenumber to calculate [rad/km]
dkx : Optional[float]
kx resolution [rad/km]
dky : Optional[float]
ky resolution [rad/km]
threshold : Optional[float]
threshold of signals to detect as a fraction of the maximum eigenvalue
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
#Calculate eigenvalues, eigenvectors
eVals,eVecs = np.linalg.eig(np.transpose(dataObj.active.Dlm))
nkx = np.ceil(2*kxMax/dkx)
if (nkx % 2) == 0: nkx = nkx+1
kxVec = kxMax * (2*np.arange(nkx)/(nkx-1) - 1)
nky = np.ceil(2*kyMax/dky)
if (nky % 2) == 0: nky = nky+1
kyVec = kyMax * (2*np.arange(nky)/(nky-1) - 1)
nkx = int(nkx)
nky = int(nky)
xm = currentData.llLookupTable[4,:] #x is in the E-W direction.
ym = currentData.llLookupTable[3,:] #y is in the N-S direction.
threshold = 0.15
maxEval = np.max(np.abs(eVals))
minEvalsInx = np.where(eVals <= threshold*maxEval)[0]
cnt = np.size(minEvalsInx)
maxEvalsInx = np.where(eVals > threshold*maxEval)[0]
nSigs = np.size(maxEvalsInx)
if cnt < 3:
logging.warning('Not enough small eigenvalues!')
import ipdb; ipdb.set_trace()
logging.info('K-Array: ' + str(nkx) + ' x ' + str(nky))
logging.info('Kx Max: ' + str(kxMax))
logging.info('Kx Res: ' + str(dkx))
logging.info('Ky Max: ' + str(kyMax))
logging.info('Ky Res: ' + str(dky))
logging.info('')
logging.info('Signal Threshold: ' + str(threshold))
logging.info('Number of Det Signals: ' + str(nSigs))
logging.info('Number of Noise Evals: ' + str(cnt))
logging.info('Starting kArr Calculation...')
t0 = datetime.datetime.now()
def vCalc(um,v):
return np.dot( np.conj(um), v) * np.dot( np.conj(v), um)
vList = [eVecs[:,minEvalsInx[ee]] for ee in xrange(cnt)]
kArr = np.zeros((nkx,nky),dtype=np.complex64)
for kk_kx in xrange(nkx):
kx = kxVec[kk_kx]
for kk_ky in xrange(nky):
ky = kyVec[kk_ky]
um = np.exp(1j*(kx*xm + ky*ym))
kArr[kk_kx,kk_ky]= 1. / np.sum(map(lambda v: vCalc(um,v), vList))
t1 = datetime.datetime.now()
logging.info('Finished kArr Calculation. Total time: ' + str(t1-t0))
currentData.karr = kArr
currentData.kxVec = kxVec
currentData.kyVec = kyVec
currentData.appendHistory('Calculated kArr')
def simulator(dataObj, dataSet='active',newDataSetName='simulated',comment=None,keepLocalRange=True,sigs=None,noiseFactor=0):
"""Replace SuperDARN Data with simulated MSTID(s). This is useful for understanding how the signal processing
routines of this module affect ideal data.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
keepLocalRange : Optional[bool]
If true, the locations calculated for the actual radar field of view will be used. If false,
a linearly-spaced will replace the true grid.
sigs : Optional[list of tuples]
A list of tuples defining the characteristics of the simulated signal. Sample list is as follows.
If this keyword is None, the values in this sample list are used as the default values.::
sigs = []
# (amp, kx, ky, f, phi, dcOffset)
sigs.append(( 5, 0.01, -0.010, 0.0004, 0, 5.))
sigs.append(( 5, 0.022, -0.023, 0.0004, 0, 5.))
Each signal is evaluated as a cosine and then summed together. The cosine evaluated is::
sig = amp * np.cos(kx*xgrid + ky*ygrid - 2.*np.pi*f*t + phi) + dc
noiseFactor : Optional[float]
Add white gaussian noise to the simulated signal. noiseFactor is a scalar such that:
noise = noiseFactor*np.random.standard_normal(nSteps)
Written by Nathaniel A. Frissell, Fall 2013
"""
from davitpy import utils
currentData = getDataSet(dataObj,dataSet)
#Typical TID Parameters:
# Frequency: 0.0003 mHz
# Period: 55.5 min
# H. Wavelength: 314 km
# k: 0.02 /km
if keepLocalRange == True:
nx, ny = np.shape(currentData.fov.relative_x)
xRange = np.max(currentData.fov.relative_x) - np.min(currentData.fov.relative_x)
yRange = np.max(currentData.fov.relative_y) - np.min(currentData.fov.relative_y)
xgrid = currentData.fov.relative_x
ygrid = currentData.fov.relative_y
else:
nx = 16
xRange = 800.
ny = 25
yRange = 600.
xvec = np.linspace(-xRange/2.,xRange/2.,nx)
yvec = np.linspace(-yRange/2.,yRange/2.,ny)
dx = np.diff(xvec)[0]
dy = np.diff(yvec)[0]
xaxis = np.append(xvec,xvec[-1]+dx)
yayis = np.append(yvec,yvec[-1]+dy)
xgrid = np.zeros((nx,ny))
ygrid = np.zeros((nx,ny))
for kk in xrange(nx): ygrid[kk,:] = yvec[:]
for kk in xrange(ny): xgrid[kk,:] = yvec[:]
if sigs == None:
#Set some default signals.
sigs = []
# (amp, kx, ky, f, phi, dcOffset)
sigs.append(( 5, 0.01, -0.010, 0.0004, 0, 5.))
sigs.append(( 5, 0.022, -0.023, 0.0004, 0, 5.))
secVec = np.array(utils.datetimeToEpoch(currentData.time))
secVec = secVec - secVec[0]
nSteps = len(secVec)
dt = currentData.samplePeriod()
dataArr = np.zeros((nSteps,nx,ny))
for step in xrange(nSteps):
t = secVec[step]
for kk in xrange(len(sigs)):
amp = sigs[kk][0]
kx = sigs[kk][1]
ky = sigs[kk][2]
f = sigs[kk][3]
phi = sigs[kk][4]
dc = sigs[kk][5]
if 1./dt <= 2.*f:
logging.warning('Nyquist Violation in f.')
logging.warning('Signal #: %i' % kk)
# if 1./dx <= 2.*kx/(2.*np.pi):
# print 'WARNING: Nyquist Violation in kx.'
# print 'Signal #: %i' % kk
#
# if 1./dy <= 2.*ky/(2.*np.pi):
# print 'WARNING: Nyquist Violation in ky.'
# print 'Signal #: %i' % kk
temp = amp * np.cos(kx*xgrid + ky*ygrid - 2.*np.pi*f*t + phi) + dc
dataArr[step,:,:] = dataArr[step,:,:] + temp
#Signal RMS
sig_rms = np.zeros((nx,ny))
for xx in xrange(nx):
for yy in xrange(ny):
sig_rms[xx,yy] = np.sqrt(np.mean((dataArr[:,xx,yy])**2.))
noise_rms = np.zeros((nx,ny))
if noiseFactor > 0:
nf = noiseFactor
#Temporal White Noise
for xx in xrange(nx):
for yy in xrange(ny):
noise = nf*np.random.standard_normal(nSteps)
noise_rms[xx,yy] = np.sqrt(np.mean(noise**2))
dataArr[:,xx,yy] = dataArr[:,xx,yy] + noise
xx = np.arange(ny)
mu = (ny-1.)/2.
sigma2 = 10.0
sigma = np.sqrt(sigma2)
rgDist = 1./(sigma*np.sqrt(2.*np.pi)) * np.exp(-0.5 * ((xx-mu)/sigma)**2)
rgDist = rgDist / np.max(rgDist)
mask = np.zeros((nx,ny))
for nn in xrange(nx): mask[nn,:] = rgDist[:]
mask3d = np.zeros((nSteps,nx,ny))
for nn in xrange(nSteps): mask3d[nn,:,:] = mask[:]
#Apply Range Gate Dependence
dataArr = dataArr * mask3d
snr = (sig_rms/noise_rms)**2
snr_db = 10.*np.log10(snr)
if comment == None:
comment = 'Simulated data injected.'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = dataArr
newDataSet.setActive()
#OPENW,unit,'simstats.txt',/GET_LUN,WIDTH=300
#stats$ = ' Mean: ' + NUMSTR(MEAN(sig_rms),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(sig_rms),3) $
# + ' Var: ' + NUMSTR(STDDEV(sig_rms)^2,3)
#PRINTF,unit,'SIG_RMS'
#PRINTF,unit,stats$
#PRINTF,unit,sig_rms
#
#PRINTF,unit,''
#PRINTF,unit,'NOISE_RMS'
#stats$ = ' Mean: ' + NUMSTR(MEAN(noise_rms),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(noise_rms),3) $
# + ' Var: ' + NUMSTR(STDDEV(noise_rms)^2,3)
#PRINTF,unit,stats$
#PRINTF,unit,noise_rms
#
#PRINTF,unit,''
#PRINTF,unit,'SNR_DB'
#stats$ = ' Mean: ' + NUMSTR(MEAN(snr_db),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(snr_db),3) $
# + ' Var: ' + NUMSTR(STDDEV(snr_db)^2,3)
#PRINTF,unit,stats$
#PRINTF,unit,snr_db
#CLOSE,unit
def scale_karr(kArr):
from scipy import stats
"""Scale/normalize kArr for plotting and signal detection.
Parameters
----------
kArr : 2D numpy.array
Two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
Returns
-------
data : 2D numpy.array
Scaled and normalized version of kArr.
Written by Nathaniel A. Frissell, Fall 2013
"""
data = np.abs(kArr) - np.min(np.abs(kArr))
#Determine scale for colorbar.
scale = [0.,1.]
sd = stats.nanstd(data,axis=None)
mean = stats.nanmean(data,axis=None)
scMax = mean + 6.5*sd
data = data / scMax
return data
def detectSignals(dataObj,dataSet='active',threshold=0.35,neighborhood=(10,10)):
"""Automatically detects local maxima/signals in a calculated kArr. This routine uses the watershed
algorithm from the skimage image processing library. Results are automatically stored in
dataObj.dataSet.sigDetect.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
threshold : Optional[float]
Scaled input data must be above this value to be detected. A higher number
will reduce the number of signals detected.
neighborhood : Optional[tuple]
Local region in which to search for peaks at every point in the image/array.
(10,10) will search a 10x10 pixel area.
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
################################################################################
#Feature detection...
#Now lets do a little image processing...
from scipy import ndimage
from skimage.morphology import watershed
from skimage.feature import peak_local_max
#sudo pip install cython
#sudo pip install scikit-image
data = scale_karr(currentData.karr)
mask = data > threshold
labels, nb = ndimage.label(mask)
distance = ndimage.distance_transform_edt(mask)
local_maxi = peak_local_max(distance,footprint=np.ones(neighborhood),indices=False)
markers,nb = ndimage.label(local_maxi)
labels = watershed(-distance,markers,mask=mask)
areas = ndimage.sum(mask,labels,xrange(1,labels.max()+1))
maxima = ndimage.maximum(data,labels,xrange(1, labels.max()+1))
order = np.argsort(maxima)[::-1] + 1
maxpos = ndimage.maximum_position(data,labels,xrange(1, labels.max()+1))
sigDetect = SigDetect()
sigDetect.mask = mask
sigDetect.labels = labels
sigDetect.nrSigs = nb
sigDetect.info = []
for x in xrange(labels.max()):
info = {}
info['labelInx'] = x+1
info['order'] = order[x]
info['area'] = areas[x]
info['max'] = maxima[x]
info['maxpos'] = maxpos[x]
info['kx'] = currentData.kxVec[info['maxpos'][0]]
info['ky'] = currentData.kyVec[info['maxpos'][1]]
info['k'] = np.sqrt( info['kx']**2 + info['ky']**2 )
info['lambda_x'] = 2*np.pi / info['kx']
info['lambda_y'] = 2*np.pi / info['ky']
info['lambda'] = 2*np.pi / info['k']
info['azm'] = np.degrees(np.arctan2(info['kx'],info['ky']))
info['freq'] = currentData.dominantFreq
info['period'] = 1./currentData.dominantFreq
info['vel'] = (2.*np.pi/info['k']) * info['freq'] * 1000.
sigDetect.info.append(info)
currentData.appendHistory('Detected KArr Signals')
currentData.sigDetect = sigDetect
return currentData
def add_signal(kx,ky,dataObj,dataSet='active',frequency=None):
"""Manually add a signal to the detected signal list. All signals will be re-ordered according to value in the
scaled kArr. Added signals can be distinguished from autodetected signals because
'labelInx' and 'area' will both be set to -1.
Parameters
----------
kx : float
Value of kx of new signal.
ky : float
Value of ky of new signal.
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
frequency : Optional[float]
Frequency to use to calculate period, phase velocity, etc. If None,
the calculated dominant frequency will be used.
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
data = scale_karr(currentData.karr)
def find_nearest_inx(array,value):
return (np.abs(array-value)).argmin()
kx_inx = find_nearest_inx(currentData.kxVec,kx)
ky_inx = find_nearest_inx(currentData.kyVec,ky)
maxpos = (kx_inx,ky_inx)
value = data[kx_inx,ky_inx]
true_value = currentData.karr[kx_inx,ky_inx] #Get the unscaled kArr value.
if frequency == None:
freq = currentData.dominantFreq
else:
freq = frequency
info = {}
info['labelInx'] = -1
info['area'] = -1
info['order'] = -1
info['max'] = value
info['true_max'] = true_value #Unscaled kArr value
info['maxpos'] = maxpos
info['kx'] = currentData.kxVec[info['maxpos'][0]]
info['ky'] = currentData.kyVec[info['maxpos'][1]]
info['k'] = np.sqrt( info['kx']**2 + info['ky']**2 )
info['lambda_x'] = 2*np.pi / info['kx']
info['lambda_y'] = 2*np.pi / info['ky']
info['lambda'] = 2*np.pi / info['k']
info['azm'] = np.degrees(np.arctan2(info['kx'],info['ky']))
info['freq'] = freq
info['period'] = 1./freq
info['vel'] = (2.*np.pi/info['k']) * info['freq'] * 1000.
currentData.sigDetect.info.append(info)
currentData.sigDetect.reorder()
currentData.appendHistory('Appended Signal to sigDetect List')
return currentData
def del_signal(order,dataObj,dataSet='active'):
"""Remove a signal to the detected signal list.
Parameters
----------
order :
Single value of list of signal orders (ID's) to be removed from the list.
dataObj : musicArray
object
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
data = scale_karr(currentData.karr)
orderArr = np.array(order)
for item in list(currentData.sigDetect.info):
if item['order'] in orderArr:
currentData.sigDetect.info.remove(item)
currentData.sigDetect.reorder()
currentData.appendHistory('Deleted Signals from sigDetect List')
return currentData
| gpl-3.0 |
pmediano/ComputationalNeurodynamics | Fall2016/Exercise_1/Solutions/IzNeuronRK4.py | 1 | 1897 | """
Computational Neurodynamics
Exercise 1
Simulates Izhikevich's neuron model using the Runge-Kutta 4 method.
Parameters for regular spiking, fast spiking and bursting
neurons extracted from:
http://www.izhikevich.org/publications/spikes.htm
(C) Murray Shanahan et al, 2016
"""
import numpy as np
import matplotlib.pyplot as plt
# Create time points
Tmin = 0
Tmax = 200 # Simulation time
dt = 0.01 # Step size
T = np.arange(Tmin, Tmax+dt, dt)
# Base current
I = 10
## Parameters of Izhikevich's model (regular spiking)
a = 0.02
b = 0.2
c = -65
d = 8
## Parameters of Izhikevich's model (fast spiking)
# a = 0.02
# b = 0.25
# c = -65
# d = 2
## Parameters of Izhikevich's model (bursting)
# a = 0.02
# b = 0.2
# c = -50
# d = 2
## Make a state vector that has a (v, u) pair for each timestep
s = np.zeros((len(T), 2))
## Initial values
s[0, 0] = -65
s[0, 1] = -1
# Note that s1[0] is v, s1[1] is u. This is Izhikevich equation in vector form
def s_dt(s1, I):
v_dt = 0.04*(s1[0]**2) + 5*s1[0] + 140 - s1[1] + I
u_dt = a*(b*s1[0] - s1[1])
return np.array([v_dt, u_dt])
## SIMULATE
for t in range(len(T)-1):
# Calculate the four constants of Runge-Kutta method
k_1 = s_dt(s[t], I)
k_2 = s_dt(s[t] + 0.5*dt*k_1, I)
k_3 = s_dt(s[t] + 0.5*dt*k_2, I)
k_4 = s_dt(s[t] + dt*k_3, I)
s[t+1] = s[t] + (1.0/6)*dt*(k_1 + 2*k_2 + 2*k_3 + k_4)
# Reset the neuron if it has spiked
if s[t+1, 0] >= 30:
s[t, 0] = 30 # Add a Dirac pulse for visualisation
s[t+1, 0] = c # Reset to resting potential
s[t+1, 1] += d # Update recovery variable
v = s[:, 0]
u = s[:, 1]
## Plot the membrane potential
plt.subplot(211)
plt.plot(T, v)
plt.xlabel('Time (ms)')
plt.ylabel('Membrane potential v (mV)')
plt.title('Izhikevich Neuron')
# Plot the reset variable
plt.subplot(212)
plt.plot(T, u)
plt.xlabel('Time (ms)')
plt.ylabel('Reset variable u')
plt.show()
| gpl-3.0 |
nvoron23/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
reuk/wayverb | scripts/python/dispersion.py | 2 | 6340 | from math import e, pi
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors, ticker, cm
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import operator
def get_base_vectors(flip):
ret = [
np.array([0.0, 2.0 * np.sqrt(2.0) / 3.0, 1.0 / 3.0]),
np.array([ np.sqrt(2.0 / 3.0), -np.sqrt(2.0) / 3.0, 1.0 / 3.0]),
np.array([0.0, 0.0, -1.0]),
np.array([-np.sqrt(2.0 / 3.0), -np.sqrt(2.0) / 3.0, 1.0 / 3.0]),
]
if flip:
ret = [np.array([1, -1, -1]) * i for i in ret]
return ret
def get_vectors():
ret = [i + j for i in get_base_vectors(False) for j in get_base_vectors(True)]
ret = filter(lambda x: np.any(x != np.array([0, 0, 0])), ret)
return ret
# DUYNE METHOD
def get_speed(arr):
"""
The diagrams in the paper appear to be continuous outside of the range
-1.5, 1.5.
However, this function has a strange discontinuity at a radius of 1.4
"""
def get_b(arr):
summed = sum([pow(e, 1j * np.dot(arr, i)) for i in get_vectors()])
return 1.0 - 0.25 * summed.real
def get_ang_g(arr):
b = get_b(arr)
return 0.5 * np.arctan(np.sqrt(4 - b * b) / abs(b))
c = np.sqrt(1.0 / 3.0)
norm = np.linalg.norm(arr)
# this analysis is only valid for frequencies below pi / 2
# (spectrum is mirrored above this limit)
# simulated frequency is equal to magnitude of wave vector (arr)
if norm < pi / 2:
return get_ang_g(arr) / (norm * c)
else:
return None
# CAMPOS METHOD
def get_speed_campos(arr):
def get_b(arr):
x, y, z = arr
a = np.cos(2.0 * x / np.sqrt(3.0)) * np.cos(2.0 * y / np.sqrt(3.0))
b = np.cos(2.0 * x / np.sqrt(3.0)) * np.cos(2.0 * z / np.sqrt(3.0))
c = np.cos(2.0 * y / np.sqrt(3.0)) * np.cos(2.0 * z / np.sqrt(3.0))
return a + b + c - 1
def get_kd(arr):
return np.sqrt(3.0) * np.arccos(get_b(arr) / 2.0) / (2.0 * np.linalg.norm(arr))
return get_kd(arr)
# direction error analysis from @hacihabiboglu
# p(x) = pressure field in spatial(?) domain
# P(w) = pressure field in frequency domain
def get_U():
v = get_base_vectors(True)
U = np.vstack(v)
return U
def eq_21(u, w):
return pow(e, -1j * np.dot(u, w)) - 1
def eq_22(w):
return np.array([eq_21(i, w) for i in get_base_vectors(True)])
def eq_23(w):
return np.dot(np.linalg.pinv(get_U()), eq_22(w))
def hermitian_angle(a, b):
prod = np.dot(a, np.conj(b)).real
mag_a = np.sqrt(np.dot(a, np.conj(a)))
mag_b = np.sqrt(np.dot(b, np.conj(b)))
return (prod / (mag_a * mag_b)).real
def direction_difference(arr):
def get_term_1():
return eq_23(arr)
def get_term_2():
return 1j * arr
return hermitian_angle(get_term_1(), get_term_2())
# monte carlo bandwidth estimation
def random_three_vector():
phi = np.random.uniform(0, pi * 2)
costheta = np.random.uniform(-1, 1)
theta = np.arccos(costheta)
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
return np.array([x, y, z])
def get_max_valid_frequency(func, accuracy, starting_freq, increments, samples):
last = starting_freq + increments
ret = starting_freq
while True:
sample_points = [random_three_vector() * last for i in range(samples)]
sampled = [func(i) for i in sample_points]
if not all(map(lambda x: x > accuracy, sampled)):
return ret
else:
ret = last
last += increments
def main():
"""
This program duplicates the tetrahedral dispersion diagrams from the paper
'The Tetrahedral Digital Waveguide Mesh' buy Duyne and Smith.
I wrote it to try to understand how to do dispersion analysis - the
analysis here is of the difference of the actual wavefront speed to the
ideal speed.
"""
w = np.array([0, 1, 0])
w /= np.linalg.norm(w)
print "w", w
for i in get_base_vectors(True):
print "u", i
print "21", eq_21(i, w)
print "22", eq_22(w)
print "23", eq_23(w)
print
print direction_difference(w)
func = direction_difference
vfunc = np.vectorize(lambda x, y, z: func(np.array([x, y, z])))
max_val = np.pi / 4
phi, theta = np.mgrid[0:pi:50j, 0:2*pi:50j]
XX = max_val * np.sin(phi) * np.cos(theta)
YY = max_val * np.sin(phi) * np.sin(theta)
ZZ = max_val * np.cos(phi)
zz = vfunc(XX, YY, ZZ)
zzmin, zzmax = zz.min(), zz.max()
print "dispersion error range:", zzmin, "to", zzmax
zz = (zz - zzmin) / (zzmax - zzmin)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(
XX, YY, ZZ, rstride=1, cstride=1, facecolors=cm.jet(zz))
plt.show()
# func = get_speed_campos
# vfunc = np.vectorize(lambda x, y, z: func(np.array([x, y, z])))
#
# min_accuracy = 0.99
# max_val = get_max_valid_frequency(func, min_accuracy, 0.1, 0.001, 20)
# print "maximum radius (frequency): ", max_val / (pi / 2)
# phi, theta = np.mgrid[0:pi:50j, 0:2*pi:50j]
# XX = max_val * np.sin(phi) * np.cos(theta)
# YY = max_val * np.sin(phi) * np.sin(theta)
# ZZ = max_val * np.cos(phi)
# zz = vfunc(XX, YY, ZZ)
# zzmin, zzmax = zz.min(), zz.max()
# print "dispersion error range:", zzmin, "to", zzmax
# zz = (zz - zzmin) / (zzmax - zzmin)
#
# fig = plt.figure()
#
# bounds = pi / 2
# N = 100
# x = np.linspace(-bounds, bounds, N)
# y = np.linspace(-bounds, bounds, N)
# X, Y = np.meshgrid(x, y)
# Z = np.zeros(X.shape)
# depth = np.linspace(0.9, 1, 11)
#
# ### plot 1
# ax = fig.add_subplot(221 + 0)
# z = vfunc(Z, X, Y)
# plt.contourf(X, Y, z, depth)
# cbar = plt.colorbar()
#
# ### plot 2
# ax = fig.add_subplot(221 + 1)
# z = vfunc(X, Z, Y)
# plt.contourf(X, Y, z, depth)
# cbar = plt.colorbar()
#
# ### plot 3
# ax = fig.add_subplot(221 + 2)
# z = vfunc(X, Y, Z)
# plt.contourf(X, Y, z, depth)
# cbar = plt.colorbar()
#
# ax = fig.add_subplot(224, projection='3d')
# ax.plot_surface(
# XX, YY, ZZ, rstride=1, cstride=1, facecolors=cm.jet(zz))
#
# plt.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
pythonvietnam/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/computation/ops.py | 7 | 15881 | """Operator classes for eval.
"""
import operator as op
from functools import partial
from datetime import datetime
import numpy as np
from pandas.types.common import is_list_like, is_scalar
import pandas as pd
from pandas.compat import PY3, string_types, text_type
import pandas.core.common as com
from pandas.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.core.base import StringMixin
from pandas.computation.common import _ensure_decoded, _result_type_many
from pandas.computation.scope import _DEFAULT_GLOBALS
_reductions = 'sum', 'prod'
_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs')
_binary_math_ops = ('arctan2',)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
tname = text_type(name)
self.is_local = (tname.startswith(_LOCAL_TAG) or
tname in _DEFAULT_GLOBALS)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def isscalar(self):
return is_scalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
def __unicode__(self):
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(pprint_thing(opr))
for opr in self.operands)
return pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def isscalar(self):
return all(operand.isscalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, acceptable_dtypes, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
.. versionadded:: 0.19.0
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
if term.type in acceptable_dtypes:
continue
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj):
return isinstance(obj, Term)
class BinOp(Op):
"""Hold a binary operator and its operands
Parameters
----------
op : str
left : Term or Op
right : Term or Op
"""
def __init__(self, op, lhs, rhs, **kwargs):
super(BinOp, self).__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError('Invalid binary operator {0!r}, valid'
' operators are {1}'.format(op, keys))
def __call__(self, env):
"""Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# handle truediv
if self.op == '/' and env.scope['truediv']:
self.func = op.truediv
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
"""Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == 'python':
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
right = self.rhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
res = pd.eval(self, local_dict=env, engine=engine,
parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.isscalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.isscalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if ((self.lhs.isscalar or self.rhs.isscalar) and
self.op in _bool_ops_dict and
(not (issubclass(self.rhs.return_type, (bool, np.bool_)) and
issubclass(self.lhs.return_type, (bool, np.bool_))))):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype):
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
truediv : bool
Whether or not to use true division. With Python 3 this happens
regardless of the value of ``truediv``.
"""
def __init__(self, lhs, rhs, truediv, *args, **kwargs):
super(Div, self).__init__('/', lhs, rhs, *args, **kwargs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op,
lhs.return_type,
rhs.return_type))
if truediv or PY3:
# do not upcast float32s to float64 un-necessarily
acceptable_dtypes = [np.float32, np.float_]
_cast_inplace(com.flatten(self), acceptable_dtypes, np.float_)
_unary_ops_syms = '+', '-', '~', 'not'
_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""Hold a unary operator and its operands
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op, operand):
super(UnaryOp, self).__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError:
raise ValueError('Invalid unary operator {0!r}, valid operators '
'are {1}'.format(op, _unary_ops_syms))
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __unicode__(self):
return pprint_thing('{0}({1})'.format(self.op, self.operand))
@property
def return_type(self):
operand = self.operand
if operand.return_type == np.dtype('bool'):
return np.dtype('bool')
if (isinstance(operand, Op) and
(operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)):
return np.dtype('bool')
return np.dtype('int')
class MathCall(Op):
def __init__(self, func, args):
super(MathCall, self).__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
with np.errstate(all='ignore'):
return self.func.func(*operands)
def __unicode__(self):
operands = map(str, self.operands)
return pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
class FuncNode(object):
def __init__(self, name):
if name not in _mathops:
raise ValueError(
"\"{0}\" is not a supported function".format(name))
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
| mit |
NZRS/content-analysis | netflix.py | 2 | 3126 | from bs4 import BeautifulSoup
from urllib2 import quote
import unicodedata
import requests
import json
import glob
import pandas as pd
movie_list = []
for page in glob.glob('*.html'):
with open(page, 'r+') as f:
my_page = f.read()
my_soup = BeautifulSoup(my_page)
for div in my_soup.find_all('div', class_='lockup'):
try:
movie_list.append(div.img.get('alt'))
except:
movie_list.append('movie could not be extracted from page')
['movie could not be extracted from page' for movie in movie_list if movie is None]
movie_list2 = []
for movie in movie_list:
try:
movie = quote(movie)
movie_list2.append(movie)
except:
try:
movie = unicodedata.normalize('NFKC', movie).encode('ascii','ignore')
movie = quote(movie)
movie_list2.append(movie)
except:
print movie
movie_list2.append('movie could not be processed')
all_movies_us = {}
for movie in movie_list2:
try:
query_url = 'http://www.omdbapi.com/?t=' + movie + '&y=&plot=full&r=json'
response = requests.get(query_url)
my_dict = json.loads(response.text)
all_movies_us[movie] = my_dict
except:
all_movies_us[movie] = 'No response'
print movie
# movies/single year shows
years_dict = {}
counter = 0
for k,v in all_movies.iteritems():
try:
if len(v['Year']) == 4:
try:
years_dict[v['Year']] += 1
except:
years_dict[v['Year']] = 1
continue
except:
counter += 1
continue
print counter
my_frame = pd.DataFrame.from_dict(years_dict, orient = 'index')
my_frame.to_csv('single_years.csv')
counter=0
score_dict = {}
for k,v in all_movies.iteritems():
try:
if v['imdbRating'] != 'N/A':
score_dict[v['Title']] = v['imdbRating']
except:
counter +=1
continue
print counter
score_dict2 ={}
for title, score in score_dict.iteritems():
try:
score_dict2[title] = float(score)
except:
print score
score_dict = score_dict2
average_score = (sum(score_dict.values()))/len(score_dict)
top_25 =
print average_score
years = []
country = []
language =[]
actors = []
for movie, results in all_movies.iteritems():
try:
years.append(results['Year'])
except:
continue
try:
country.append(results['Country'])
except:
continue
try:
language.append(results['Language'])
except:
continue
try:
for actor in results['Actors'].split(','):
actors.append(actor)
except:
continue
# Ongoing shows
years_dict = {}
counter = 0
for k,v in all_movies.iteritems():
try:
print v['Year'][4]
except:
continue
# Languages
lang_list = []
for lang in language:
for x in lang.split(','):
lang_list.append(x)
lang_list
Counter(lang_list)
Counter(lang_list).most_common(10)
| agpl-3.0 |
chrisburr/scikit-learn | sklearn/metrics/ranking.py | 17 | 26927 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from .base import _average_binary_score
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (array_equal(classes, [0, 1]) or
array_equal(classes, [-1, 1]) or
array_equal(classes, [0]) or
array_equal(classes, [-1]) or
array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
drop_intermediate : boolean, optional (default=True)
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True,
np.logical_or(np.diff(fps, 2),
np.diff(tps, 2)),
True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
.. versionadded:: 0.17
A function *label_ranking_loss*
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
rboyes/KerasScripts | CSVTrainer.py | 1 | 5321 | import os
import datetime
import sys
import time
import string
import random
import pandas as pd
import numpy as np
import gc
if(len(sys.argv) < 2):
print('Usage: CSVTrainer.py train.csv validation.csv model.h5 log.txt')
sys.exit(1)
trainingName = sys.argv[1]
validationName = sys.argv[2]
modelName = sys.argv[3]
logName = sys.argv[4]
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import *
import keras.preprocessing.image as image
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, CSVLogger
from keras.layers import Input, merge, Dropout, Dense, Flatten, Activation
from keras.layers.convolutional import MaxPooling2D, Convolution2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam, SGD
from keras.models import Model, load_model
from keras import regularizers
from keras import backend as K
from keras.utils.data_utils import get_file
from sklearn.metrics import accuracy_score
from keras.applications import resnet50
def readCSV(fileList):
namesDataFrame = pd.read_csv(fileList)
flatten = lambda l: [item for sublist in l for item in sublist]
labels = sorted(list(set(flatten([l.split(' ') for l in namesDataFrame['tags'].values]))))
labelMap = {l: i for i, l in enumerate(labels)}
numberOfLabels = len(labels)
numberOfImages = len(namesDataFrame)
fileNames = []
y = np.zeros((numberOfImages, numberOfLabels), np.float32)
for index in range(0, numberOfImages):
inputImage = image.img_to_array(image.load_img(namesDataFrame.iloc[index][0]))
fileNames.append(namesDataFrame.iloc[index][0])
tags = namesDataFrame.iloc[index][1]
for t in tags.split(' '):
y[index, labelMap[t]] = 1.0
return (fileNames, y, labelMap)
print('Loading images..........', end = '',flush = True)
(trainingFileNames, trainY, trainingLabelMap) = readCSV(trainingName)
(validationFileNames, validationY, validationLabelMap) = readCSV(validationName)
print('done.', flush = True)
if len(trainingLabelMap) != len(validationLabelMap):
print("Label maps for training and validation are not equal")
sys.exit(1)
numberOfTrainingImages = len(trainingFileNames)
numberOfValidationImages = len(validationFileNames)
numberOfChannels = 3
nx = 256
ny = 256
batchSize = 25
lossName = 'binary_crossentropy'
activationName = 'sigmoid'
resnetModel = resnet50.ResNet50(include_top=False, weights='imagenet', input_shape=(numberOfChannels, nx, ny))
print('The number of layers in the resnet model = %d' % (len(resnetModel.layers)))
bottleneckTrainingDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
bottleneckValidationDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
bottleneckTrainingGenerator = bottleneckTrainingDataGenerator.flow_from_filenames(trainingFileNames, target_size = (nx, ny), batch_size = batchSize, shuffle = False)
bottleneckValidationGenerator = bottleneckTrainingDataGenerator.flow_from_filenames(validationFileNames, target_size = (nx, ny), batch_size = batchSize, shuffle = False)
bottleneckTrainingFeatures = resnetModel.predict_generator(bottleneckTrainingGenerator, numberOfTrainingImages)
bottleneckValidationFeatures = resnetModel.predict_generator(bottleneckValidationGenerator, numberOfValidationImages)
newTop = Sequential()
newTop.add(Flatten(input_shape = bottleneckTrainingFeatures.shape[1:]))
newTop.add(Dense(512, activation='relu'))
newTop.add(Dropout(0.5))
newTop.add(Dense(len(trainingLabelMap), activation=activationName, name='predictions'))
newTop.compile(loss=lossName, optimizer=Adam(lr=1.0E-3))
print('Fitting predicted features...', flush = True)
newTop.fit(bottleneckTrainingFeatures, trainY, validation_data = (bottleneckValidationFeatures, validationY), verbose = 1, batch_size = batchSize, nb_epoch = 25)
print('Done.', flush = True)
finalModel = Model(input = resnetModel.input, output = newTop(resnetModel.output))
print('The number of layers in the final model = %d' % (len(finalModel.layers)))
for layer in finalModel.layers[:(len(resnetModel.layers) - 21)]:
layer.trainable = False
finalModel.compile(loss=lossName,optimizer=SGD(lr=1e-4, momentum=0.9))
print(finalModel.summary())
# Could add vertical_flip = True
trainingDataGenerator = ImageDataGenerator(rescale = 1.0/255.0, rotation_range = 40, zoom_range = 0.15, horizontal_flip = True,
width_shift_range = 0.1, height_shift_range = 0.1, shear_range = 0.1)
validationDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
trainingGenerator = trainingDataGenerator.flow_from_filenames(trainingFileNames, trainY, batch_size = batchSize, target_size = (nx, ny))
validationGenerator = validationDataGenerator.flow_from_filenames(validationFileNames, validationY, batch_size = batchSize, target_size = (nx, ny))
csvLogger = CSVLogger(logName, append=True)
checkPointer = ModelCheckpoint(filepath=modelName, verbose = 1, save_best_only = True)
finalModel.fit_generator(trainingGenerator, numberOfTrainingImages, 50, validation_data = validationGenerator,
nb_val_samples = numberOfValidationImages, callbacks = [checkPointer, csvLogger])
| apache-2.0 |
francisco-dlp/hyperspy | hyperspy/drawing/utils.py | 1 | 57321 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import itertools
import textwrap
from traits import trait_base
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backend_bases import key_press_handler
import warnings
import numpy as np
from distutils.version import LooseVersion
import logging
import hyperspy as hs
_logger = logging.getLogger(__name__)
def contrast_stretching(data, saturated_pixels):
"""Calculate bounds that leaves out a given percentage of the data.
Parameters
----------
data: numpy array
saturated_pixels: scalar, None
The percentage of pixels that are left out of the bounds. For example,
the low and high bounds of a value of 1 are the 0.5% and 99.5%
percentiles. It must be in the [0, 100] range. If None, set the value
to 0.
Returns
-------
vmin, vmax: scalar
The low and high bounds
Raises
------
ValueError if the value of `saturated_pixels` is out of the valid range.
"""
# Sanity check
if saturated_pixels is None:
saturated_pixels = 0
if not 0 <= saturated_pixels <= 100:
raise ValueError(
"saturated_pixels must be a scalar in the range[0, 100]")
vmin = np.nanpercentile(data, saturated_pixels / 2.)
vmax = np.nanpercentile(data, 100 - saturated_pixels / 2.)
return vmin, vmax
MPL_DIVERGING_COLORMAPS = [
"BrBG",
"bwr",
"coolwarm",
"PiYG",
"PRGn",
"PuOr",
"RdBu",
"RdGy",
"RdYIBu",
"RdYIGn",
"seismic",
"Spectral", ]
# Add reversed colormaps
MPL_DIVERGING_COLORMAPS += [cmap + "_r" for cmap in MPL_DIVERGING_COLORMAPS]
def centre_colormap_values(vmin, vmax):
"""Calculate vmin and vmax to set the colormap midpoint to zero.
Parameters
----------
vmin, vmax : scalar
The range of data to display.
Returns
-------
cvmin, cvmax : scalar
The values to obtain a centre colormap.
"""
absmax = max(abs(vmin), abs(vmax))
return -absmax, absmax
def create_figure(window_title=None,
_on_figure_window_close=None,
disable_xyscale_keys=False,
**kwargs):
"""Create a matplotlib figure.
This function adds the possibility to execute another function
when the figure is closed and to easily set the window title. Any
keyword argument is passed to the plt.figure function
Parameters
----------
window_title : string
_on_figure_window_close : function
disable_xyscale_keys : bool, disable the `k`, `l` and `L` shortcuts which
toggle the x or y axis between linear and log scale.
Returns
-------
fig : plt.figure
"""
fig = plt.figure(**kwargs)
if window_title is not None:
# remove non-alphanumeric characters to prevent file saving problems
# This is a workaround for:
# https://github.com/matplotlib/matplotlib/issues/9056
reserved_characters = r'<>"/\|?*'
for c in reserved_characters:
window_title = window_title.replace(c, '')
window_title = window_title.replace('\n', ' ')
window_title = window_title.replace(':', ' -')
fig.canvas.set_window_title(window_title)
if disable_xyscale_keys and hasattr(fig.canvas, 'toolbar'):
# hack the `key_press_handler` to disable the `k`, `l`, `L` shortcuts
manager = fig.canvas.manager
fig.canvas.mpl_disconnect(manager.key_press_handler_id)
manager.key_press_handler_id = manager.canvas.mpl_connect(
'key_press_event',
lambda event: key_press_handler_custom(event, manager.canvas))
if _on_figure_window_close is not None:
on_figure_window_close(fig, _on_figure_window_close)
return fig
def key_press_handler_custom(event, canvas):
if event.key not in ['k', 'l', 'L']:
key_press_handler(event, canvas, canvas.manager.toolbar)
def on_figure_window_close(figure, function):
"""Connects a close figure signal to a given function.
Parameters
----------
figure : mpl figure instance
function : function
"""
def function_wrapper(evt):
function()
figure.canvas.mpl_connect('close_event', function_wrapper)
def plot_RGB_map(im_list, normalization='single', dont_plot=False):
"""Plot 2 or 3 maps in RGB.
Parameters
----------
im_list : list of Signal2D instances
normalization : {'single', 'global'}
dont_plot : bool
Returns
-------
array: RGB matrix
"""
# from widgets import cursors
height, width = im_list[0].data.shape[:2]
rgb = np.zeros((height, width, 3))
rgb[:, :, 0] = im_list[0].data.squeeze()
rgb[:, :, 1] = im_list[1].data.squeeze()
if len(im_list) == 3:
rgb[:, :, 2] = im_list[2].data.squeeze()
if normalization == 'single':
for i in range(len(im_list)):
rgb[:, :, i] /= rgb[:, :, i].max()
elif normalization == 'global':
rgb /= rgb.max()
rgb = rgb.clip(0, rgb.max())
if not dont_plot:
figure = plt.figure()
ax = figure.add_subplot(111)
ax.frameon = False
ax.set_axis_off()
ax.imshow(rgb, interpolation='nearest')
# cursors.set_mpl_ax(ax)
figure.canvas.draw_idle()
else:
return rgb
def subplot_parameters(fig):
"""Returns a list of the subplot parameters of a mpl figure.
Parameters
----------
fig : mpl figure
Returns
-------
tuple : (left, bottom, right, top, wspace, hspace)
"""
wspace = fig.subplotpars.wspace
hspace = fig.subplotpars.hspace
left = fig.subplotpars.left
right = fig.subplotpars.right
top = fig.subplotpars.top
bottom = fig.subplotpars.bottom
return left, bottom, right, top, wspace, hspace
class ColorCycle:
_color_cycle = [mpl.colors.colorConverter.to_rgba(color) for color
in ('b', 'g', 'r', 'c', 'm', 'y', 'k')]
def __init__(self):
self.color_cycle = copy.copy(self._color_cycle)
def __call__(self):
if not self.color_cycle:
self.color_cycle = copy.copy(self._color_cycle)
return self.color_cycle.pop(0)
def plot_signals(signal_list, sync=True, navigator="auto",
navigator_list=None, **kwargs):
"""Plot several signals at the same time.
Parameters
----------
signal_list : list of BaseSignal instances
If sync is set to True, the signals must have the
same navigation shape, but not necessarily the same signal shape.
sync : True or False, default "True"
If True: the signals will share navigation, all the signals
must have the same navigation shape for this to work, but not
necessarily the same signal shape.
navigator : {"auto", None, "spectrum", "slider", BaseSignal}, default "auto"
See signal.plot docstring for full description
navigator_list : {List of navigator arguments, None}, default None
Set different navigator options for the signals. Must use valid
navigator arguments: "auto", None, "spectrum", "slider", or a
hyperspy Signal. The list must have the same size as signal_list.
If None, the argument specified in navigator will be used.
**kwargs
Any extra keyword arguments are passed to each signal `plot` method.
Example
-------
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll])
Specifying the navigator:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll], navigator="slider")
Specifying the navigator for each signal:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> s_edx = hs.load("edx.dm3")
>>> s_adf = hs.load("adf.dm3")
>>> hs.plot.plot_signals(
[s_cl, s_ll, s_edx], navigator_list=["slider",None,s_adf])
"""
import hyperspy.signal
if navigator_list:
if not (len(signal_list) == len(navigator_list)):
raise ValueError(
"signal_list and navigator_list must"
" have the same size")
if sync:
axes_manager_list = []
for signal in signal_list:
axes_manager_list.append(signal.axes_manager)
if not navigator_list:
navigator_list = []
if navigator is None:
navigator_list.extend([None] * len(signal_list))
elif isinstance(navigator, hyperspy.signal.BaseSignal):
navigator_list.append(navigator)
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator == "slider":
navigator_list.append("slider")
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator == "spectrum":
navigator_list.extend(["spectrum"] * len(signal_list))
elif navigator == "auto":
navigator_list.extend(["auto"] * len(signal_list))
else:
raise ValueError(
"navigator must be one of \"spectrum\",\"auto\","
" \"slider\", None, a Signal instance")
# Check to see if the spectra have the same navigational shapes
temp_shape_first = axes_manager_list[0].navigation_shape
for i, axes_manager in enumerate(axes_manager_list):
temp_shape = axes_manager.navigation_shape
if not (temp_shape_first == temp_shape):
raise ValueError(
"The spectra does not have the same navigation shape")
axes_manager_list[i] = axes_manager.deepcopy()
if i > 0:
for axis0, axisn in zip(axes_manager_list[0].navigation_axes,
axes_manager_list[i].navigation_axes):
axes_manager_list[i]._axes[axisn.index_in_array] = axis0
del axes_manager
for signal, navigator, axes_manager in zip(signal_list,
navigator_list,
axes_manager_list):
signal.plot(axes_manager=axes_manager,
navigator=navigator,
**kwargs)
# If sync is False
else:
if not navigator_list:
navigator_list = []
navigator_list.extend([navigator] * len(signal_list))
for signal, navigator in zip(signal_list, navigator_list):
signal.plot(navigator=navigator,
**kwargs)
def _make_heatmap_subplot(spectra):
from hyperspy._signals.signal2d import Signal2D
im = Signal2D(spectra.data, axes=spectra.axes_manager._get_axes_dicts())
im.metadata.General.title = spectra.metadata.General.title
im.plot()
return im._plot.signal_plot.ax
def set_xaxis_lims(mpl_ax, hs_axis):
"""
Set the matplotlib axis limits to match that of a HyperSpy axis
Parameters
----------
mpl_ax : :class:`matplotlib.axis.Axis`
The ``matplotlib`` axis to change
hs_axis : :class:`~hyperspy.axes.DataAxis`
The data axis that contains the values that control the scaling
"""
x_axis_lower_lim = hs_axis.axis[0]
x_axis_upper_lim = hs_axis.axis[-1]
mpl_ax.set_xlim(x_axis_lower_lim, x_axis_upper_lim)
def _make_overlap_plot(spectra, ax, color="blue", line_style='-'):
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
spectrum = _transpose_if_required(spectrum, 1)
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_ylabel('Intensity')
ax.autoscale(tight=True)
def _make_cascade_subplot(
spectra, ax, color="blue", line_style='-', padding=1):
max_value = 0
for spectrum in spectra:
spectrum_yrange = (np.nanmax(spectrum.data) -
np.nanmin(spectrum.data))
if spectrum_yrange > max_value:
max_value = spectrum_yrange
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
spectrum = _transpose_if_required(spectrum, 1)
data_to_plot = ((spectrum.data - spectrum.data.min()) /
float(max_value) + spectrum_index * padding)
ax.plot(x_axis.axis, data_to_plot, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_yticks([])
ax.autoscale(tight=True)
def _plot_spectrum(spectrum, ax, color="blue", line_style='-'):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
def _set_spectrum_xlabel(spectrum, ax):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.set_xlabel("%s (%s)" % (x_axis.name, x_axis.units))
def _transpose_if_required(signal, expected_dimension):
# EDS profiles or maps have signal dimension = 0 and navigation dimension
# 1 or 2. For convenience transpose the signal if possible
if (signal.axes_manager.signal_dimension == 0 and
signal.axes_manager.navigation_dimension == expected_dimension):
return signal.T
else:
return signal
def plot_images(images,
cmap=None,
no_nans=False,
per_row=3,
label='auto',
labelwrap=30,
suptitle=None,
suptitle_fontsize=18,
colorbar='multi',
centre_colormap="auto",
saturated_pixels=0,
scalebar=None,
scalebar_color='white',
axes_decor='all',
padding=None,
tight_layout=False,
aspect='auto',
min_asp=0.1,
namefrac_thresh=0.4,
fig=None,
vmin=None,
vmax=None,
*args,
**kwargs):
"""Plot multiple images as sub-images in one figure.
Extra keyword arguments are passed to `matplotlib.figure`.
Parameters
----------
images : list of Signal2D or BaseSignal
`images` should be a list of Signals to plot. For `BaseSignal` with
navigation dimensions 2 and signal dimension 0, the signal will be
tranposed to form a `Signal2D`.
Multi-dimensional images will have each plane plotted as a separate
image.
If any signal shape is not suitable, a ValueError will be raised.
cmap : matplotlib colormap, list, or ``'mpl_colors'``, *optional*
The colormap used for the images, by default read from ``pyplot``.
A list of colormaps can also be provided, and the images will
cycle through them. Optionally, the value ``'mpl_colors'`` will
cause the cmap to loop through the default ``matplotlib``
colors (to match with the default output of the
:py:func:`~.drawing.utils.plot_spectra` method.
Note: if using more than one colormap, using the ``'single'``
option for ``colorbar`` is disallowed.
no_nans : bool, optional
If True, set nans to zero for plotting.
per_row : int, optional
The number of plots in each row
label : None, str, or list of str, optional
Control the title labeling of the plotted images.
If None, no titles will be shown.
If 'auto' (default), function will try to determine suitable titles
using Signal2D titles, falling back to the 'titles' option if no good
short titles are detected.
Works best if all images to be plotted have the same beginning
to their titles.
If 'titles', the title from each image's metadata.General.title
will be used.
If any other single str, images will be labeled in sequence using
that str as a prefix.
If a list of str, the list elements will be used to determine the
labels (repeated, if necessary).
labelwrap : int, optional
integer specifying the number of characters that will be used on
one line
If the function returns an unexpected blank figure, lower this
value to reduce overlap of the labels between each figure
suptitle : str, optional
Title to use at the top of the figure. If called with label='auto',
this parameter will override the automatically determined title.
suptitle_fontsize : int, optional
Font size to use for super title at top of figure
colorbar : {'multi', None, 'single'}
Controls the type of colorbars that are plotted.
If None, no colorbar is plotted.
If 'multi' (default), individual colorbars are plotted for each
(non-RGB) image
If 'single', all (non-RGB) images are plotted on the same scale,
and one colorbar is shown for all
centre_colormap : {"auto", True, False}
If True the centre of the color scheme is set to zero. This is
specially useful when using diverging color schemes. If "auto"
(default), diverging color schemes are automatically centred.
saturated_pixels: None, scalar or list of scalar, optional, default: 0
If list of scalar, the length should match the number of images to
show. If provide in the list, set the value to 0.
The percentage of pixels that are left out of the bounds. For
example, the low and high bounds of a value of 1 are the 0.5% and
99.5% percentiles. It must be in the [0, 100] range.
scalebar : {None, 'all', list of ints}, optional
If None (or False), no scalebars will be added to the images.
If 'all', scalebars will be added to all images.
If list of ints, scalebars will be added to each image specified.
scalebar_color : str, optional
A valid MPL color string; will be used as the scalebar color
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks/labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
padding : None or dict, optional
This parameter controls the spacing between images.
If None, default options will be used
Otherwise, supply a dictionary with the spacing options as
keywords and desired values as values
Values should be supplied as used in pyplot.subplots_adjust(),
and can be:
'left', 'bottom', 'right', 'top', 'wspace' (width),
and 'hspace' (height)
tight_layout : bool, optional
If true, hyperspy will attempt to improve image placement in
figure using matplotlib's tight_layout
If false, repositioning images inside the figure will be left as
an exercise for the user.
aspect : str or numeric, optional
If 'auto', aspect ratio is auto determined, subject to min_asp.
If 'square', image will be forced onto square display.
If 'equal', aspect ratio of 1 will be enforced.
If float (or int/long), given value will be used.
min_asp : float, optional
Minimum aspect ratio to be used when plotting images
namefrac_thresh : float, optional
Threshold to use for auto-labeling. This parameter controls how
much of the titles must be the same for the auto-shortening of
labels to activate. Can vary from 0 to 1. Smaller values
encourage shortening of titles by auto-labeling, while larger
values will require more overlap in titles before activing the
auto-label code.
fig : mpl figure, optional
If set, the images will be plotted to an existing MPL figure
vmin, vmax : scalar or list of scalar, optional, default: None
If list of scalar, the length should match the number of images to
show.
A list of scalar is not compatible with a single colorbar.
See vmin, vmax of matplotlib.imshow() for more details.
*args, **kwargs, optional
Additional arguments passed to matplotlib.imshow()
Returns
-------
axes_list : list
a list of subplot axes that hold the images
See Also
--------
plot_spectra : Plotting of multiple spectra
plot_signals : Plotting of multiple signals
plot_histograms : Compare signal histograms
Notes
-----
`interpolation` is a useful parameter to provide as a keyword
argument to control how the space between pixels is interpolated. A
value of ``'nearest'`` will cause no interpolation between pixels.
`tight_layout` is known to be quite brittle, so an option is provided
to disable it. Turn this option off if output is not as expected,
or try adjusting `label`, `labelwrap`, or `per_row`
"""
def __check_single_colorbar(cbar):
if cbar == 'single':
raise ValueError('Cannot use a single colorbar with multiple '
'colormaps. Please check for compatible '
'arguments.')
from hyperspy.drawing.widgets import ScaleBar
from hyperspy.misc import rgb_tools
from hyperspy.signal import BaseSignal
# Check that we have a hyperspy signal
im = [images] if not isinstance(images, (list, tuple)) else images
for image in im:
if not isinstance(image, BaseSignal):
raise ValueError("`images` must be a list of image signals or a "
"multi-dimensional signal."
" " + repr(type(images)) + " was given.")
# For list of EDS maps, transpose the BaseSignal
if isinstance(images, (list, tuple)):
images = [_transpose_if_required(image, 2) for image in images]
# If input is >= 1D signal (e.g. for multi-dimensional plotting),
# copy it and put it in a list so labeling works out as (x,y) when plotting
if isinstance(images,
BaseSignal) and images.axes_manager.navigation_dimension > 0:
images = [images._deepcopy_with_new_data(images.data)]
n = 0
for i, sig in enumerate(images):
if sig.axes_manager.signal_dimension != 2:
raise ValueError("This method only plots signals that are images. "
"The signal dimension must be equal to 2. "
"The signal at position " + repr(i) +
" was " + repr(sig) + ".")
# increment n by the navigation size, or by 1 if the navigation size is
# <= 0
n += (sig.axes_manager.navigation_size
if sig.axes_manager.navigation_size > 0
else 1)
# If no cmap given, get default colormap from pyplot:
if cmap is None:
cmap = [plt.get_cmap().name]
elif cmap == 'mpl_colors':
for n_color, c in enumerate(mpl.rcParams['axes.prop_cycle']):
make_cmap(colors=['#000000', c['color']],
name='mpl{}'.format(n_color))
cmap = ['mpl{}'.format(i) for i in
range(len(mpl.rcParams['axes.prop_cycle']))]
__check_single_colorbar(colorbar)
# cmap is list, tuple, or something else iterable (but not string):
elif hasattr(cmap, '__iter__') and not isinstance(cmap, str):
try:
cmap = [c.name for c in cmap] # convert colormap to string
except AttributeError:
cmap = [c for c in cmap] # c should be string if not colormap
__check_single_colorbar(colorbar)
elif isinstance(cmap, mpl.colors.Colormap):
cmap = [cmap.name] # convert single colormap to list with string
elif isinstance(cmap, str):
cmap = [cmap] # cmap is single string, so make it a list
else:
# Didn't understand cmap input, so raise error
raise ValueError('The provided cmap value was not understood. Please '
'check input values.')
# If any of the cmaps given are diverging, and auto-centering, set the
# appropriate flag:
if centre_colormap == "auto":
centre_colormaps = []
for c in cmap:
if c in MPL_DIVERGING_COLORMAPS:
centre_colormaps.append(True)
else:
centre_colormaps.append(False)
# if it was True, just convert to list
elif centre_colormap:
centre_colormaps = [True]
# likewise for false
elif not centre_colormap:
centre_colormaps = [False]
# finally, convert lists to cycle generators for adaptive length:
centre_colormaps = itertools.cycle(centre_colormaps)
cmap = itertools.cycle(cmap)
def _check_arg(arg, default_value, arg_name):
if isinstance(arg, list):
if len(arg) != n:
_logger.warning('The provided {} values are ignored because the '
'length of the list does not match the number of '
'images'.format(arg_name))
arg = [default_value] * n
else:
arg = [arg] * n
return arg
vmin = _check_arg(vmin, None, 'vmin')
vmax = _check_arg(vmax, None, 'vmax')
saturated_pixels = _check_arg(saturated_pixels, 0, 'saturated_pixels')
# Sort out the labeling:
div_num = 0
all_match = False
shared_titles = False
user_labels = False
if label is None:
pass
elif label == 'auto':
# Use some heuristics to try to get base string of similar titles
label_list = [x.metadata.General.title for x in images]
# Find the shortest common string between the image titles
# and pull that out as the base title for the sequence of images
# array in which to store arrays
res = np.zeros((len(label_list), len(label_list[0]) + 1))
res[:, 0] = 1
# j iterates the strings
for j in range(len(label_list)):
# i iterates length of substring test
for i in range(1, len(label_list[0]) + 1):
# stores whether or not characters in title match
res[j, i] = label_list[0][:i] in label_list[j]
# sum up the results (1 is True, 0 is False) and create
# a substring based on the minimum value (this will be
# the "smallest common string" between all the titles
if res.all():
basename = label_list[0]
div_num = len(label_list[0])
all_match = True
else:
div_num = int(min(np.sum(res, 1)))
basename = label_list[0][:div_num - 1]
all_match = False
# trim off any '(' or ' ' characters at end of basename
if div_num > 1:
while True:
if basename[len(basename) - 1] == '(':
basename = basename[:-1]
elif basename[len(basename) - 1] == ' ':
basename = basename[:-1]
else:
break
# namefrac is ratio of length of basename to the image name
# if it is high (e.g. over 0.5), we can assume that all images
# share the same base
if len(label_list[0]) > 0:
namefrac = float(len(basename)) / len(label_list[0])
else:
# If label_list[0] is empty, it means there was probably no
# title set originally, so nothing to share
namefrac = 0
if namefrac > namefrac_thresh:
# there was a significant overlap of label beginnings
shared_titles = True
# only use new suptitle if one isn't specified already
if suptitle is None:
suptitle = basename
else:
# there was not much overlap, so default back to 'titles' mode
shared_titles = False
label = 'titles'
div_num = 0
elif label == 'titles':
# Set label_list to each image's pre-defined title
label_list = [x.metadata.General.title for x in images]
elif isinstance(label, str):
# Set label_list to an indexed list, based off of label
label_list = [label + " " + repr(num) for num in range(n)]
elif isinstance(label, list) and all(
isinstance(x, str) for x in label):
label_list = label
user_labels = True
# If list of labels is longer than the number of images, just use the
# first n elements
if len(label_list) > n:
del label_list[n:]
if len(label_list) < n:
label_list *= (n // len(label_list)) + 1
del label_list[n:]
else:
raise ValueError("Did not understand input of labels.")
# Determine appropriate number of images per row
rows = int(np.ceil(n / float(per_row)))
if n < per_row:
per_row = n
# Set overall figure size and define figure (if not pre-existing)
if fig is None:
k = max(plt.rcParams['figure.figsize']) / max(per_row, rows)
f = plt.figure(figsize=(tuple(k * i for i in (per_row, rows))))
else:
f = fig
# Initialize list to hold subplot axes
axes_list = []
# Initialize list of rgb tags
isrgb = [False] * len(images)
# Check to see if there are any rgb images in list
# and tag them using the isrgb list
for i, img in enumerate(images):
if rgb_tools.is_rgbx(img.data):
isrgb[i] = True
# Determine how many non-rgb Images there are
non_rgb = list(itertools.compress(images, [not j for j in isrgb]))
if len(non_rgb) == 0 and colorbar is not None:
colorbar = None
warnings.warn("Sorry, colorbar is not implemented for RGB images.")
# Find global min and max values of all the non-rgb images for use with
# 'single' scalebar
if colorbar == 'single':
# get a g_saturated_pixels from saturated_pixels
if isinstance(saturated_pixels, list):
g_saturated_pixels = min(np.array([v for v in saturated_pixels]))
else:
g_saturated_pixels = saturated_pixels
# estimate a g_vmin and g_max from saturated_pixels
g_vmin, g_vmax = contrast_stretching(np.concatenate(
[i.data.flatten() for i in non_rgb]), g_saturated_pixels)
# if vmin and vmax are provided, override g_min and g_max
if isinstance(vmin, list):
_logger.warning('vmin have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmin = vmin if vmin is not None else g_vmin
if isinstance(vmax, list):
_logger.warning('vmax have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmax = vmax if vmax is not None else g_vmax
if next(centre_colormaps):
g_vmin, g_vmax = centre_colormap_values(g_vmin, g_vmax)
# Check if we need to add a scalebar for some of the images
if isinstance(scalebar, list) and all(isinstance(x, int)
for x in scalebar):
scalelist = True
else:
scalelist = False
idx = 0
ax_im_list = [0] * len(isrgb)
# Replot: create a list to store references to the images
replot_ims = []
# Loop through each image, adding subplot for each one
for i, ims in enumerate(images):
# Get handles for the signal axes and axes_manager
axes_manager = ims.axes_manager
if axes_manager.navigation_dimension > 0:
ims = ims._deepcopy_with_new_data(ims.data)
for j, im in enumerate(ims):
ax = f.add_subplot(rows, per_row, idx + 1)
axes_list.append(ax)
data = im.data
centre = next(centre_colormaps) # get next value for centreing
# Enable RGB plotting
if rgb_tools.is_rgbx(data):
data = rgb_tools.rgbx2regular_array(data, plot_friendly=True)
l_vmin, l_vmax = None, None
else:
data = im.data
# Find min and max for contrast
l_vmin, l_vmax = contrast_stretching(
data, saturated_pixels[idx])
l_vmin = vmin[idx] if vmin[idx] is not None else l_vmin
l_vmax = vmax[idx] if vmax[idx] is not None else l_vmax
if centre:
l_vmin, l_vmax = centre_colormap_values(l_vmin, l_vmax)
# Remove NaNs (if requested)
if no_nans:
data = np.nan_to_num(data)
# Get handles for the signal axes and axes_manager
axes_manager = im.axes_manager
axes = axes_manager.signal_axes
# Set dimensions of images
xaxis = axes[0]
yaxis = axes[1]
extent = (
xaxis.low_value,
xaxis.high_value,
yaxis.high_value,
yaxis.low_value,
)
if not isinstance(aspect, (int, float)) and aspect not in [
'auto', 'square', 'equal']:
_logger.warning("Did not understand aspect ratio input. "
"Using 'auto' as default.")
aspect = 'auto'
if aspect == 'auto':
if float(yaxis.size) / xaxis.size < min_asp:
factor = min_asp * float(xaxis.size) / yaxis.size
elif float(yaxis.size) / xaxis.size > min_asp ** -1:
factor = min_asp ** -1 * float(xaxis.size) / yaxis.size
else:
factor = 1
asp = np.abs(factor * float(xaxis.scale) / yaxis.scale)
elif aspect == 'square':
asp = abs(extent[1] - extent[0]) / abs(extent[3] - extent[2])
elif aspect == 'equal':
asp = 1
elif isinstance(aspect, (int, float)):
asp = aspect
if 'interpolation' not in kwargs.keys():
kwargs['interpolation'] = 'nearest'
# Get colormap for this image:
cm = next(cmap)
# Plot image data, using vmin and vmax to set bounds,
# or allowing them to be set automatically if using individual
# colorbars
if colorbar == 'single' and not isrgb[i]:
axes_im = ax.imshow(data,
cmap=cm,
extent=extent,
vmin=g_vmin, vmax=g_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
else:
axes_im = ax.imshow(data,
cmap=cm,
extent=extent,
vmin=l_vmin,
vmax=l_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
# If an axis trait is undefined, shut off :
if isinstance(xaxis.units, trait_base._Undefined) or \
isinstance(yaxis.units, trait_base._Undefined) or \
isinstance(xaxis.name, trait_base._Undefined) or \
isinstance(yaxis.name, trait_base._Undefined):
if axes_decor == 'all':
_logger.warning(
'Axes labels were requested, but one '
'or both of the '
'axes units and/or name are undefined. '
'Axes decorations have been set to '
'\'ticks\' instead.')
axes_decor = 'ticks'
# If all traits are defined, set labels as appropriate:
else:
ax.set_xlabel(axes[0].name + " axis (" + axes[0].units + ")")
ax.set_ylabel(axes[1].name + " axis (" + axes[1].units + ")")
if label:
if all_match:
title = ''
elif shared_titles:
title = label_list[i][div_num - 1:]
else:
if len(ims) == n:
# This is true if we are plotting just 1
# multi-dimensional Signal2D
title = label_list[idx]
elif user_labels:
title = label_list[idx]
else:
title = label_list[i]
if ims.axes_manager.navigation_size > 1 and not user_labels:
title += " %s" % str(ims.axes_manager.indices)
ax.set_title(textwrap.fill(title, labelwrap))
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
# If using independent colorbars, add them
if colorbar == 'multi' and not isrgb[i]:
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(axes_im, cax=cax)
# Add scalebars as necessary
if (scalelist and idx in scalebar) or scalebar == 'all':
ax.scalebar = ScaleBar(
ax=ax,
units=axes[0].units,
color=scalebar_color,
)
# Replot: store references to the images
replot_ims.append(im)
idx += 1
# If using a single colorbar, add it, and do tight_layout, ensuring that
# a colorbar is only added based off of non-rgb Images:
if colorbar == 'single':
foundim = None
for i in range(len(isrgb)):
if (not isrgb[i]) and foundim is None:
foundim = i
if foundim is not None:
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.9, 0.1, 0.03, 0.8])
f.colorbar(ax_im_list[foundim], cax=cbar_ax)
if tight_layout:
# tight_layout, leaving room for the colorbar
plt.tight_layout(rect=[0, 0, 0.9, 1])
elif tight_layout:
plt.tight_layout()
elif tight_layout:
plt.tight_layout()
# Set top bounds for shared titles and add suptitle
if suptitle:
f.subplots_adjust(top=0.85)
f.suptitle(suptitle, fontsize=suptitle_fontsize)
# If we want to plot scalebars, loop through the list of axes and add them
if scalebar is None or scalebar is False:
# Do nothing if no scalebars are called for
pass
elif scalebar == 'all':
# scalebars were taken care of in the plotting loop
pass
elif scalelist:
# scalebars were taken care of in the plotting loop
pass
else:
raise ValueError("Did not understand scalebar input. Must be None, "
"\'all\', or list of ints.")
# Adjust subplot spacing according to user's specification
if padding is not None:
plt.subplots_adjust(**padding)
# Replot: connect function
def on_dblclick(event):
# On the event of a double click, replot the selected subplot
if not event.inaxes:
return
if not event.dblclick:
return
subplots = [axi for axi in f.axes if isinstance(axi, mpl.axes.Subplot)]
inx = list(subplots).index(event.inaxes)
im = replot_ims[inx]
# Use some of the info in the subplot
cm = subplots[inx].images[0].get_cmap()
clim = subplots[inx].images[0].get_clim()
sbar = False
if (scalelist and inx in scalebar) or scalebar == 'all':
sbar = True
im.plot(colorbar=bool(colorbar),
vmin=clim[0],
vmax=clim[1],
no_nans=no_nans,
aspect=asp,
scalebar=sbar,
scalebar_color=scalebar_color,
cmap=cm)
f.canvas.mpl_connect('button_press_event', on_dblclick)
return axes_list
def set_axes_decor(ax, axes_decor):
if axes_decor == 'off':
ax.axis('off')
elif axes_decor == 'ticks':
ax.set_xlabel('')
ax.set_ylabel('')
elif axes_decor == 'all':
pass
elif axes_decor is None:
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_xticklabels([])
ax.set_yticklabels([])
def make_cmap(colors, name='my_colormap', position=None,
bit=False, register=True):
"""
Create a matplotlib colormap with customized colors, optionally registering
it with matplotlib for simplified use.
Adapted from Chris Slocum's code at:
https://github.com/CSlocumWX/custom_colormap/blob/master/custom_colormaps.py
and used under the terms of that code's BSD-3 license
Parameters
----------
colors : iterable
list of either tuples containing rgb values, or html strings
Colors should be arranged so that the first color is the lowest
value for the colorbar and the last is the highest.
name : str
name of colormap to use when registering with matplotlib
position : None or iterable
list containing the values (from [0,1]) that dictate the position
of each color within the colormap. If None (default), the colors
will be equally-spaced within the colorbar.
bit : boolean
True if RGB colors are given in 8-bit [0 to 255] or False if given
in arithmetic basis [0 to 1] (default)
register : boolean
switch to control whether or not to register the custom colormap
with matplotlib in order to enable use by just the name string
"""
def _html_color_to_rgb(color_string):
""" convert #RRGGBB to an (R, G, B) tuple """
color_string = color_string.strip()
if color_string[0] == '#':
color_string = color_string[1:]
if len(color_string) != 6:
raise ValueError(
"input #{} is not in #RRGGBB format".format(color_string))
r, g, b = color_string[:2], color_string[2:4], color_string[4:]
r, g, b = [int(n, 16) / 255 for n in (r, g, b)]
return r, g, b
bit_rgb = np.linspace(0, 1, 256)
if position is None:
position = np.linspace(0, 1, len(colors))
else:
if len(position) != len(colors):
raise ValueError("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
raise ValueError("position must start with 0 and end with 1")
cdict = {'red': [], 'green': [], 'blue': []}
for pos, color in zip(position, colors):
if isinstance(color, str):
color = _html_color_to_rgb(color)
elif bit:
color = (bit_rgb[color[0]],
bit_rgb[color[1]],
bit_rgb[color[2]])
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
cmap = mpl.colors.LinearSegmentedColormap(name, cdict, 256)
if register:
mpl.cm.register_cmap(name, cmap)
return cmap
def plot_spectra(
spectra,
style='overlap',
color=None,
line_style=None,
padding=1.,
legend=None,
legend_picking=True,
legend_loc='upper right',
fig=None,
ax=None,
**kwargs):
"""Plot several spectra in the same figure.
Extra keyword arguments are passed to `matplotlib.figure`.
Parameters
----------
spectra : list of Signal1D or BaseSignal
Ordered spectra list of signal to plot. If `style` is "cascade" or
"mosaic" the spectra can have different size and axes. For `BaseSignal`
with navigation dimensions 1 and signal dimension 0, the signal will be
tranposed to form a `Signal1D`.
style : {'overlap', 'cascade', 'mosaic', 'heatmap'}
The style of the plot.
color : matplotlib color or a list of them or `None`
Sets the color of the lines of the plots (no action on 'heatmap').
If a list, if its length is less than the number of spectra to plot,
the colors will be cycled. If `None`, use default matplotlib color
cycle.
line_style: matplotlib line style or a list of them or `None`
Sets the line style of the plots (no action on 'heatmap').
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
padding : float, optional, default 0.1
Option for "cascade". 1 guarantees that there is not overlapping.
However, in many cases a value between 0 and 1 can produce a tighter
plot without overlapping. Negative values have the same effect but
reverse the order of the spectra without reversing the order of the
colors.
legend: None or list of str or 'auto'
If list of string, legend for "cascade" or title for "mosaic" is
displayed. If 'auto', the title of each spectra (metadata.General.title)
is used.
legend_picking: bool
If true, a spectrum can be toggle on and off by clicking on
the legended line.
legend_loc : str or int
This parameter controls where the legend is placed on the figure;
see the pyplot.legend docstring for valid values
fig : matplotlib figure or None
If None, a default figure will be created. Specifying fig will
not work for the 'heatmap' style.
ax : matplotlib ax (subplot) or None
If None, a default ax will be created. Will not work for 'mosaic'
or 'heatmap' style.
**kwargs
remaining keyword arguments are passed to matplotlib.figure() or
matplotlib.subplots(). Has no effect on 'heatmap' style.
Example
-------
>>> s = hs.load("some_spectra")
>>> hs.plot.plot_spectra(s, style='cascade', color='red', padding=0.5)
To save the plot as a png-file
>>> hs.plot.plot_spectra(s).figure.savefig("test.png")
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
import hyperspy.signal
def _reverse_legend(ax_, legend_loc_):
"""
Reverse the ordering of a matplotlib legend (to be more consistent
with the default ordering of plots in the 'cascade' and 'overlap'
styles
Parameters
----------
ax_: matplotlib axes
legend_loc_: str or int
This parameter controls where the legend is placed on the
figure; see the pyplot.legend docstring for valid values
"""
l = ax_.get_legend()
labels = [lb.get_text() for lb in list(l.get_texts())]
handles = l.legendHandles
ax_.legend(handles[::-1], labels[::-1], loc=legend_loc_)
# Before v1.3 default would read the value from prefereces.
if style == "default":
style = "overlap"
if color is not None:
if isinstance(color, str):
color = itertools.cycle([color])
elif hasattr(color, "__iter__"):
color = itertools.cycle(color)
else:
raise ValueError("Color must be None, a valid matplotlib color "
"string or a list of valid matplotlib colors.")
else:
if LooseVersion(mpl.__version__) >= "1.5.3":
color = itertools.cycle(
plt.rcParams['axes.prop_cycle'].by_key()["color"])
else:
color = itertools.cycle(plt.rcParams['axes.color_cycle'])
if line_style is not None:
if isinstance(line_style, str):
line_style = itertools.cycle([line_style])
elif hasattr(line_style, "__iter__"):
line_style = itertools.cycle(line_style)
else:
raise ValueError("line_style must be None, a valid matplotlib"
" line_style string or a list of valid matplotlib"
" line_style.")
else:
line_style = ['-'] * len(spectra)
if legend is not None:
if isinstance(legend, str):
if legend == 'auto':
legend = [spec.metadata.General.title for spec in spectra]
else:
raise ValueError("legend must be None, 'auto' or a list of"
" string")
elif hasattr(legend, "__iter__"):
legend = itertools.cycle(legend)
if style == 'overlap':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_overlap_plot(spectra,
ax,
color=color,
line_style=line_style,)
if legend is not None:
ax.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
if legend_picking is True:
animate_legend(fig=fig, ax=ax)
elif style == 'cascade':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_cascade_subplot(spectra,
ax,
color=color,
line_style=line_style,
padding=padding)
if legend is not None:
plt.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
elif style == 'mosaic':
default_fsize = plt.rcParams["figure.figsize"]
figsize = (default_fsize[0], default_fsize[1] * len(spectra))
fig, subplots = plt.subplots(
len(spectra), 1, figsize=figsize, **kwargs)
if legend is None:
legend = [legend] * len(spectra)
for spectrum, ax, color, line_style, legend in zip(
spectra, subplots, color, line_style, legend):
spectrum = _transpose_if_required(spectrum, 1)
_plot_spectrum(spectrum, ax, color=color, line_style=line_style)
ax.set_ylabel('Intensity')
if legend is not None:
ax.set_title(legend)
if not isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
if isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
fig.tight_layout()
elif style == 'heatmap':
if not isinstance(spectra, hyperspy.signal.BaseSignal):
import hyperspy.utils
spectra = [_transpose_if_required(spectrum, 1) for spectrum in
spectra]
spectra = hyperspy.utils.stack(spectra)
with spectra.unfolded():
ax = _make_heatmap_subplot(spectra)
ax.set_ylabel('Spectra')
ax = ax if style != "mosaic" else subplots
return ax
def animate_legend(fig=None, ax=None):
"""Animate the legend of a figure.
A spectrum can be toggle on and off by clicking on the legended line.
Parameters
----------
fig: None | matplotlib.figure
If None pick the current figure using "plt.gcf"
ax: None | matplotlib.axes
If None pick the current axes using "plt.gca".
Note
----
Code inspired from legend_picking.py in the matplotlib gallery
"""
if fig is None:
fig = plt.gcf()
if ax is None:
ax = plt.gca()
lines = ax.lines[::-1]
lined = dict()
leg = ax.get_legend()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(5) # 5 pts tolerance
lined[legline] = origline
def onpick(event):
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
if legline.axes == ax:
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('pick_event', onpick)
def plot_histograms(signal_list,
bins='freedman',
range_bins=None,
color=None,
line_style=None,
legend='auto',
fig=None,
**kwargs):
"""Plot the histogram of every signal in the list in the same figure.
This function creates a histogram for each signal and plot the list with
the `utils.plot.plot_spectra` function.
Parameters
----------
signal_list : iterable
Ordered spectra list to plot. If `style` is "cascade" or "mosaic"
the spectra can have different size and axes.
bins : int or list or str, optional
If bins is a string, then it must be one of:
'knuth' : use Knuth's rule to determine bins
'scotts' : use Scott's rule to determine bins
'freedman' : use the Freedman-diaconis rule to determine bins
'blocks' : use bayesian blocks for dynamic bin widths
range_bins : tuple or None, optional.
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
color : valid matplotlib color or a list of them or `None`, optional.
Sets the color of the lines of the plots. If a list, if its length is
less than the number of spectra to plot, the colors will be cycled. If
If `None`, use default matplotlib color cycle.
line_style: valid matplotlib line style or a list of them or `None`,
optional.
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
legend: None or list of str or 'auto', optional.
Display a legend. If 'auto', the title of each spectra
(metadata.General.title) is used.
legend_picking: bool, optional.
If true, a spectrum can be toggle on and off by clicking on
the legended line.
fig : matplotlib figure or None, optional.
If None, a default figure will be created.
**kwargs
other keyword arguments (weight and density) are described in
np.histogram().
Example
-------
Histograms of two random chi-square distributions
>>> img = hs.signals.Signal2D(np.random.chisquare(1,[10,10,100]))
>>> img2 = hs.signals.Signal2D(np.random.chisquare(2,[10,10,100]))
>>> hs.plot.plot_histograms([img,img2],legend=['hist1','hist2'])
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
hists = []
for obj in signal_list:
hists.append(obj.get_histogram(bins=bins,
range_bins=range_bins, **kwargs))
if line_style is None:
line_style = 'steps'
return plot_spectra(hists, style='overlap', color=color,
line_style=line_style, legend=legend, fig=fig)
| gpl-3.0 |
idlead/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
abimannans/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ssaeger/scikit-learn | sklearn/feature_selection/tests/test_base.py | 143 | 3670 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
tracierenea/gnuradio | gr-filter/examples/channelize.py | 58 | 7003 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 1000 # initial sampling rate
self._M = M = 9 # Number of channels to channelize
self._ifs = M*self._fs # initial sampling rate
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = blocks.add_cc()
freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80]
for i in xrange(len(freqs)):
f = freqs[i] + (M/2-M+i+1)*self._fs
self.signals.append(analog.sig_source_c(self._ifs, analog.GR_SIN_WAVE, f, 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._ifs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(X))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
mediaProduct2017/learn_NeuralNet | neural_network_design.py | 1 | 1568 | """
In order to decide how many hidden nodes the hidden layer should have,
split up the data set into training and testing data and create networks
with various hidden node counts (5, 10, 15, ... 45), testing the performance
for each.
The best-performing node count is used in the actual system. If multiple counts
perform similarly, choose the smallest count for a smaller network with fewer computations.
"""
import numpy as np
from ocr import OCRNeuralNetwork
from sklearn.cross_validation import train_test_split
def test(data_matrix, data_labels, test_indices, nn):
avg_sum = 0
for j in xrange(100):
correct_guess_count = 0
for i in test_indices:
test = data_matrix[i]
prediction = nn.predict(test)
if data_labels[i] == prediction:
correct_guess_count += 1
avg_sum += (correct_guess_count / float(len(test_indices)))
return avg_sum / 100
# Load data samples and labels into matrix
data_matrix = np.loadtxt(open('data.csv', 'rb'), delimiter = ',').tolist()
data_labels = np.loadtxt(open('dataLabels.csv', 'rb')).tolist()
# Create training and testing sets.
train_indices, test_indices = train_test_split(list(range(5000)))
print "PERFORMANCE"
print "-----------"
# Try various number of hidden nodes and see what performs best
for i in xrange(5, 50, 5):
nn = OCRNeuralNetwork(i, data_matrix, data_labels, train_indices, False)
performance = str(test(data_matrix, data_labels, test_indices, nn))
print "{i} Hidden Nodes: {val}".format(i=i, val=performance) | mit |
edxnercel/edx-platform | .pycharm_helpers/pydev/pydev_ipython/inputhook.py | 52 | 18411 | # coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import select
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def ignore_CTRL_C():
"""Ignore CTRL+C (not implemented)."""
pass
def allow_CTRL_C():
"""Take CTRL+C into account (not implemented)."""
pass
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self._return_control_callback = None
self._apps = {}
self._reset()
self.pyplot_imported = False
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._current_gui = None
def set_return_control_callback(self, return_control_callback):
self._return_control_callback = return_control_callback
def get_return_control_callback(self):
return self._return_control_callback
def return_control(self):
return self._return_control_callback()
def get_inputhook(self):
return self._callback
def set_inputhook(self, callback):
"""Set inputhook to callback."""
# We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather
# while waiting for input on xmlrpc we run this code
self._callback = callback
def clear_inputhook(self, app=None):
"""Clear input hook.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
self._reset()
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif gui in self._apps:
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
from distutils.version import LooseVersion as V
wx_version = V(wx.__version__).version
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__)
from pydev_ipython.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
if app is None:
app = wx.GetApp()
if app is None:
app = wx.App(redirect=False, clearSigInt=False)
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_WX in self._apps:
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from pydev_ipython.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_QT4 in self._apps:
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk import create_inputhook_gtk
self.set_inputhook(create_inputhook_gtk(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
try:
import Tkinter as _TK
except:
# Python 3
import tkinter as _TK
app = _TK.Tk()
app.withdraw()
self._apps[GUI_TK] = app
from pydev_ipython.inputhooktk import create_inputhook_tk
self.set_inputhook(create_inputhook_tk(app))
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_glut(self, app=None):
""" Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
import OpenGL.GLUT as glut
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True
def disable_glut(self):
"""Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
import OpenGL.GLUT as glut
from glut_support import glutMainLoopEvent # @UnresolvedImport
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
self.clear_inputhook()
def enable_pyglet(self, app=None):
"""Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookpyglet import inputhook_pyglet
self.set_inputhook(inputhook_pyglet)
self._current_gui = GUI_PYGLET
return app
def disable_pyglet(self):
"""Disable event loop integration with pyglet.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk3(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_mac(self, app=None):
""" Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only.
"""
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX
def disable_mac(self):
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
enable_glut = inputhook_manager.enable_glut
disable_glut = inputhook_manager.disable_glut
enable_pyglet = inputhook_manager.enable_pyglet
disable_pyglet = inputhook_manager.disable_pyglet
enable_gtk3 = inputhook_manager.enable_gtk3
disable_gtk3 = inputhook_manager.disable_gtk3
enable_mac = inputhook_manager.enable_mac
disable_mac = inputhook_manager.disable_mac
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# We maintain this as stdin_ready so that the individual inputhooks
# can diverge as little as possible from their IPython sources
stdin_ready = inputhook_manager.return_control
set_return_control_callback = inputhook_manager.set_return_control_callback
get_return_control_callback = inputhook_manager.get_return_control_callback
get_inputhook = inputhook_manager.get_inputhook
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if get_return_control_callback() is None:
raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled")
guis = {GUI_NONE: clear_inputhook,
GUI_OSX: enable_mac,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt4, # qt3 not supported
GUI_QT4: enable_qt4,
GUI_GLUT: enable_glut,
GUI_PYGLET: enable_pyglet,
GUI_GTK3: enable_gtk3,
}
try:
gui_hook = guis[gui]
except KeyError:
if gui is None or gui == '':
gui_hook = clear_inputhook
else:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
__all__ = [
"GUI_WX",
"GUI_QT",
"GUI_QT4",
"GUI_GTK",
"GUI_TK",
"GUI_OSX",
"GUI_GLUT",
"GUI_PYGLET",
"GUI_GTK3",
"GUI_NONE",
"ignore_CTRL_C",
"allow_CTRL_C",
"InputHookManager",
"inputhook_manager",
"enable_wx",
"disable_wx",
"enable_qt4",
"disable_qt4",
"enable_gtk",
"disable_gtk",
"enable_tk",
"disable_tk",
"enable_glut",
"disable_glut",
"enable_pyglet",
"disable_pyglet",
"enable_gtk3",
"disable_gtk3",
"enable_mac",
"disable_mac",
"clear_inputhook",
"set_inputhook",
"current_gui",
"clear_app_refs",
"stdin_ready",
"set_return_control_callback",
"get_return_control_callback",
"get_inputhook",
"enable_gui"]
| agpl-3.0 |
linebp/pandas | pandas/tests/series/test_indexing.py | 1 | 88099 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
import pandas._libs.index as _index
from pandas.core.dtypes.common import is_integer, is_scalar
from pandas import (Index, Series, DataFrame, isnull,
date_range, NaT, MultiIndex,
Timestamp, DatetimeIndex, Timedelta)
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
from pandas._libs import tslib, lib
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import (slow,
assert_series_equal,
assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData):
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
assert result == expected
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
assert result == 'Missing'
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
assert result == 3
result = vc.get(True, default='Missing')
assert result == 'Missing'
def test_get_nan(self):
# GH 8569
s = pd.Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default='Missing') == 'Missing'
# ensure that fixing the above hasn't broken get
# with multiple elements
idx = [20, 30]
assert_series_equal(s.get(idx),
Series([np.nan] * 2, index=idx))
idx = [np.nan, np.nan]
assert_series_equal(s.get(idx),
Series([np.nan] * 2, index=idx))
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
pytest.raises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
pytest.raises(IndexError, s.__getitem__, -11)
pytest.raises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_iloc(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.loc[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
assert (s[1:3] == 0).all()
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iloc_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
assert s.iloc[2] == 2
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
assert (s == 0).all()
s[:-12] = 5
assert (s == 0).all()
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
tm.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
assert s.index.name == 'index_name'
assert s.dtype == np.int64
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
pytest.raises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
pytest.raises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
pytest.raises(Exception, s.__getitem__, omask)
pytest.raises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
pytest.raises(Exception, ts.__getitem__, mask_shifted)
pytest.raises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
pytest.raises(Exception, ts.loc.__getitem__, mask_shifted)
pytest.raises(Exception, ts.loc.__setitem__, mask_shifted, 1)
# ts.loc[mask_shifted]
# ts.loc[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
assert (s[:4] == 0).all()
assert not (s[4:] == 0).any()
def test_getitem_setitem_datetime_tz_pytz(self):
from pytz import timezone as tz
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = tz('US/Central').localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil(self):
from dateutil.tz import tzutc
from pandas._libs.tslib import _dateutil_gettz as gettz
tz = lambda x: tzutc() if x == 'UTC' else gettz(
x) # handle special case for utc in dateutil
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H',
tz='America/New_York')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz('America/Chicago'))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz('America/Chicago'))] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetimeindex(self):
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# repeat all the above with naive datetimes
result = ts[datetime(1990, 1, 1, 4)]
expected = ts[4]
assert result == expected
result = ts.copy()
result[datetime(1990, 1, 1, 4)] = 0
result[datetime(1990, 1, 1, 4)] = ts[4]
assert_series_equal(result, ts)
result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8]
assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
assert_series_equal(result, ts)
def test_getitem_setitem_periodindex(self):
from pandas import period_range
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
pytest.raises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
assert s.iloc[0] == s['a']
s.iloc[0] = 5
tm.assert_almost_equal(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
pytest.raises(KeyError, s.loc.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
assert is_scalar(obj['c'])
assert obj['c'] == 0
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.loc[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
pytest.raises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
assert result == s.loc['A']
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.loc[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.loc[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
assert self.series.index[9] not in numSlice.index
assert self.objSeries.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == self.series.index[11]
assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:])
# Test return view.
sl = self.series[10:20]
sl[:] = 0
assert (self.series[10:20] == 0).all()
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
pytest.raises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
pytest.raises(TypeError, f)
pytest.raises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
pytest.raises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
assert len(s.loc[12.0:]) == 8
assert len(s.loc[12.5:]) == 7
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
assert len(s.loc[12.0:]) == 8
assert len(s.loc[12.5:]) == 7
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
assert np.isnan(self.ts[6])
assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
assert (series[::2] == 0).all()
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
assert res is self.ts
assert self.ts[idx] == 0
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
assert res is s
assert res.index[-1] == 'foobar'
assert res['foobar'] == 0
s = self.series.copy()
s.loc['foobar'] = 0
assert s.index[-1] == 'foobar'
assert s['foobar'] == 0
def test_setslice(self):
sl = self.ts[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
pytest.raises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
pytest.raises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.loc[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
assert result == expected
result = s.iloc[0]
assert result == expected
result = s['a']
assert result == expected
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.loc[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.loc[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.loc[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.loc[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
pytest.raises(Exception, s.__setitem__, inds_notfound, 0)
pytest.raises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
assert result == expected
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
assert result == expected
s2 = s.copy()
s2['a'] = expected
result = s2['a']
assert result == expected
def test_loc_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.loc[inds], self.series.reindex(inds))
assert_series_equal(self.series.iloc[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.loc[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.loc[mask], self.series[mask])
# ask for index value
assert self.ts.loc[d1] == self.ts[d1]
assert self.ts.loc[d2] == self.ts[d2]
def test_loc_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
pytest.raises(KeyError, ts2.loc.__getitem__, slice(d1, d2))
pytest.raises(KeyError, ts2.loc.__setitem__, slice(d1, d2), 0)
def test_loc_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).all()
# so is this
cp = s.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = s.iloc[2:6]
result2 = s.loc[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
pytest.raises(KeyError, s2.loc.__getitem__, slice(3, 11))
pytest.raises(KeyError, s2.loc.__setitem__, slice(3, 11), 0)
def test_loc_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.loc[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00-04:00', tz=tz),
pd.Timestamp('2011-01-01 00:00-05:00', tz=tz),
pd.Timestamp('2016-11-06 01:00-05:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
pytest.raises(ValueError, s.where, 1)
pytest.raises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
pytest.raises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
pytest.raises(ValueError, f)
def f():
s[mask] = [0] * 5
pytest.raises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isnull(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_array_like(self):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
klasses = [list, tuple, np.array, Series]
for klass in klasses:
result = s.where(klass(cond))
assert_series_equal(result, expected)
def test_where_invalid_input(self):
# see gh-15414: only boolean arrays accepted
s = Series([1, 2, 3])
msg = "Boolean array expected for the condition"
conds = [
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"),
pd.NaT, Timestamp("2017-01-02")]
]
for cond in conds:
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
msg = "Array conditional must be same shape as self"
with tm.assert_raises_regex(ValueError, msg):
s.where([True])
def test_where_ndframe_align(self):
msg = "Array conditional must be same shape as self"
s = Series([1, 2, 3])
cond = [True]
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
expected = Series([1, np.nan, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
cond = np.array([False, True, False, True])
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
expected = Series([np.nan, 2, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
pytest.raises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
pytest.raises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
pytest.raises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
pytest.raises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
pytest.raises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
# GH 15701
timestamps = ['2016-12-31 12:00:04+00:00',
'2016-12-31 12:00:04.010000+00:00']
s = Series([pd.Timestamp(t) for t in timestamps])
rs = s.where(Series([False, True]))
expected = Series([pd.NaT, s[1]])
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
pytest.raises(ValueError, s.mask, 1)
pytest.raises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.loc[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.iloc[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.loc[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.loc[d1] = 4
self.series.loc[d2] = 6
assert self.series[d1] == 4
assert self.series[d2] == 6
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, ['X', 'Y', 'Z'])
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.loc[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.loc[inds] = 5
pytest.raises(Exception, self.series.loc.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
pytest.raises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
pytest.raises(KeyError, s.__getitem__, 5)
pytest.raises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
pytest.raises(KeyError, s.__getitem__, 5)
pytest.raises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
pytest.raises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
assert s[stamp] == 0
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
pytest.raises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
assert s[stamp] == 0
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
assert df['bb'].iloc[0] == 0.15
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
assert not np.isnan(self.ts[10])
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
pytest.raises(ValueError, s.drop, 'bc')
pytest.raises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.iloc[1:]
assert_series_equal(result, expected)
# bad axis
pytest.raises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
assert s.index.is_object()
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
assert (aa.reindex(diff_a) == fill).all()
if len(diff_b) > 0:
assert (ab.reindex(diff_b) == fill).all()
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
assert aa.name == 'ts'
assert ea.name == 'ts'
assert ab.name == 'ts'
assert eb.name == 'ts'
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
assert not (a[:5] == 5).any()
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
assert (a[:5] == 5).all()
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
assert not (b[:3] == 5).any()
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
assert (b[:2] == 5).all()
def test_align_same_index(self):
a, b = self.ts.align(self.ts, copy=False)
assert a.index is self.ts.index
assert b.index is self.ts.index
a, b = self.ts.align(self.ts, copy=True)
assert a.index is not self.ts.index
assert b.index is not self.ts.index
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
assert np.may_share_memory(self.series.index, identity.index)
except AttributeError:
pass
assert identity.index.is_(self.series.index)
assert identity.index.identical(self.series.index)
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
assert val == self.series[idx]
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
assert val == self.ts[idx]
stuffSeries = self.ts.reindex(subIndex)
assert np.isnan(stuffSeries).all()
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
assert val == self.ts[idx]
# return a copy the same index here
result = self.ts.reindex()
assert not (result is self.ts)
def test_reindex_nan(self):
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
assert np.issubdtype(result.dtype, np.dtype('M8[ns]'))
mask = result.isnull()
assert mask[-5:].all()
assert not mask[:-5].any()
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
self.empty.reindex(self.ts.index, method='pad') # it works
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
pytest.raises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10), dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a', 'g', 'c', 'f']
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inferrence of new dtype
s = Series([True, False, False, True], index=list('abcd'))
new_index = 'agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=lrange(0, 5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False, index=lrange(0, 5))
assert_series_equal(result, expected)
def test_reindex_nearest(self):
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
def test_reindex_backfill(self):
pass
def test_reindex_int(self):
ts = self.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(self.ts.index)
# if NaNs introduced
assert reindexed_int.dtype == np.float_
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
assert reindexed_int.dtype == np.int_
def test_reindex_bool(self):
# A series other than float, int, string, or object
ts = self.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(self.ts.index)
# if NaNs introduced
assert reindexed_bool.dtype == np.object_
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
assert reindexed_bool.dtype == np.bool_
def test_reindex_bool_pad(self):
# fail
ts = self.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(self.ts.index, method='pad')
assert isnull(filled_bool[:5]).all()
def test_reindex_like(self):
other = self.ts[::2]
assert_series_equal(self.ts.reindex(other.index),
self.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013, 3, 5)
day2 = datetime(2013, 5, 5)
day3 = datetime(2014, 3, 5)
series1 = Series([5, None, None], [day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value(self):
# -----------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
# -----------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
assert issubclass(result.dtype.type, np.integer)
assert_series_equal(result, expected)
# -----------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
# ------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_select(self):
n = len(self.ts)
result = self.ts.select(lambda x: x >= self.ts.index[n // 2])
expected = self.ts.reindex(self.ts.index[n // 2:])
assert_series_equal(result, expected)
result = self.ts.select(lambda x: x.weekday() == 2)
expected = self.ts[self.ts.index.weekday == 2]
assert_series_equal(result, expected)
def test_cast_on_putmask(self):
# GH 2746
# need to upcast
s = Series([1, 2], index=[1, 2], dtype='int64')
s[[True, False]] = Series([0], index=[1], dtype='int64')
expected = Series([0, 2], index=[1, 2], dtype='int64')
assert_series_equal(s, expected)
def test_type_promote_putmask(self):
# GH8387: test that changing types does not break alignment
ts = Series(np.random.randn(100), index=np.arange(100, 0, -1)).round(5)
left, mask = ts.copy(), ts > 0
right = ts[mask].copy().map(str)
left[mask] = right
assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))
s = Series([0, 1, 2, 0])
mask = s > 0
s2 = s[mask].map(str)
s[mask] = s2
assert_series_equal(s, Series([0, '1', '2', 0]))
s = Series([0, 'foo', 'bar', 0])
mask = Series([False, True, True, False])
s2 = s[mask]
s[mask] = s2
assert_series_equal(s, Series([0, 'foo', 'bar', 0]))
def test_head_tail(self):
assert_series_equal(self.series.head(), self.series[:5])
assert_series_equal(self.series.head(0), self.series[0:0])
assert_series_equal(self.series.tail(), self.series[-5:])
assert_series_equal(self.series.tail(0), self.series[0:0])
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.loc['foo']
assert result.name == s.name
assert result2.name == s.name
def test_setitem_scalar_into_readonly_backing_data(self):
# GH14359: test that you cannot mutate a read only buffer
array = np.zeros(5)
array.flags.writeable = False # make the array immutable
series = Series(array)
for n in range(len(series)):
with pytest.raises(ValueError):
series[n] = 1
assert array[n] == 0
def test_setitem_slice_into_readonly_backing_data(self):
# GH14359: test that you cannot mutate a read only buffer
array = np.zeros(5)
array.flags.writeable = False # make the array immutable
series = Series(array)
with pytest.raises(ValueError):
series[1:3] = 1
assert not array.any()
class TestTimeSeriesDuplicates(object):
def setup_method(self, method):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
assert isinstance(self.dups, Series)
assert isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
assert not self.dups.index.is_unique
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
assert uniques.dtype == 'M8[ns]' # sanity
tm.assert_index_equal(uniques, expected)
assert self.dups.index.nunique() == 4
# #2563
assert isinstance(uniques, DatetimeIndex)
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, name='foo')
expected = expected.tz_localize('US/Eastern')
assert result.tz is not None
assert result.name == 'foo'
tm.assert_index_equal(result, expected)
# NaT, note this is excluded
arr = [1370745748 + t for t in range(20)] + [tslib.iNaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
arr = [Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t)
for t in range(20)] + [NaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
assert d in ix
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
pytest.raises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000, 1, 6)] = 0
assert ts[datetime(2000, 1, 6)] == 0
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
assert timestamp in df.index
# it works!
df.loc[timestamp]
assert len(df.loc[[timestamp]]) > 0
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(np.random.rand(len(rng)), index=rng)
ts2 = pd.concat([ts[0:4], ts[-4:], ts[4:-4]])
for t in ts.index:
# TODO: unused?
s = str(t) # noqa
expected = ts[t]
result = ts2[t]
assert expected == result
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result, expected)
compare(slice('2011-01-01', '2011-01-15'))
compare(slice('2010-12-30', '2011-01-15'))
compare(slice('2011-01-01', '2011-01-16'))
# partial ranges
compare(slice('2011-01-01', '2011-01-6'))
compare(slice('2011-01-06', '2011-01-8'))
compare(slice('2011-01-06', '2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result, expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
assert t.year == 2005
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)), index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
expected.name = 'A'
df = DataFrame(dict(A=ts))
result = df['2001']['A']
assert_series_equal(expected, result)
# setting
ts['2001'] = 1
expected = ts['2001']
expected.name = 'A'
df.loc['2001', 'A'] = 1
result = df['2001']['A']
assert_series_equal(expected, result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00',
freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected, ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59',
freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected, ts)
idx = [Timestamp('2013-05-31 00:00'),
Timestamp(datetime(2013, 5, 31, 23, 59, 59, 999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected, ts)
# GH14826, indexing with a seconds resolution string / datetime object
df = DataFrame(np.random.rand(5, 5),
columns=['open', 'high', 'low', 'close', 'volume'],
index=date_range('2012-01-02 18:01:00',
periods=5, tz='US/Central', freq='s'))
expected = df.loc[[df.index[2]]]
# this is a single date, so will raise
pytest.raises(KeyError, df.__getitem__, '2012-01-02 18:01:02', )
pytest.raises(KeyError, df.__getitem__, df.index[2], )
class TestDatetimeIndexing(object):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
assert s[48] == 48
assert s['1/2/2009'] == 48
assert s['2009-1-2'] == 48
assert s[datetime(2009, 1, 2)] == 48
assert s[lib.Timestamp(datetime(2009, 1, 2))] == 48
pytest.raises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
assert s[48] == -1
s['1/2/2009'] = -2
assert s[48] == -2
s['1/2/2009':'2009-06-05'] = -3
assert (s[48:54] == -3).all()
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
assert (res == exp).all()
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
assert (res == exp).all()
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
assert d2.dtypes[0] == np.dtype('M8[ns]')
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
assert df.index[0] == stamp
assert df.reset_index()['Date'][0] == stamp
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# assert s2.values.dtype == 'M8[ns]'
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.loc[datetime(1900, 1, 1):datetime(2100, 1, 1)]
def test_slicing_datetimes(self):
# GH 7523
# unique
df = DataFrame(np.arange(4., dtype='float64'),
index=[datetime(2001, 1, i, 10, 00)
for i in [1, 2, 3, 4]])
result = df.loc[datetime(2001, 1, 1, 10):]
assert_frame_equal(result, df)
result = df.loc[:datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10):datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11):]
expected = df.iloc[1:]
assert_frame_equal(result, expected)
result = df.loc['20010101 11':]
assert_frame_equal(result, expected)
# duplicates
df = pd.DataFrame(np.arange(5., dtype='float64'),
index=[datetime(2001, 1, i, 10, 00)
for i in [1, 2, 2, 3, 4]])
result = df.loc[datetime(2001, 1, 1, 10):]
assert_frame_equal(result, df)
result = df.loc[:datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10):datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11):]
expected = df.iloc[1:]
assert_frame_equal(result, expected)
result = df.loc['20010101 11':]
assert_frame_equal(result, expected)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
assert (-result).all()
tst = DataFrame({'date': dates})
result = tst.duplicated()
assert (-result).all()
class TestNatIndexing(object):
def setup_method(self, method):
self.series = Series(date_range('1/1/2000', periods=10))
# ---------------------------------------------------------------------
# NaT support
def test_set_none_nan(self):
self.series[3] = None
assert self.series[3] is NaT
self.series[3:5] = None
assert self.series[4] is NaT
self.series[5] = np.nan
assert self.series[5] is NaT
self.series[5:7] = np.nan
assert self.series[6] is NaT
def test_nat_operations(self):
# GH 8617
s = Series([0, pd.NaT], dtype='m8[ns]')
exp = s[0]
assert s.median() == exp
assert s.min() == exp
assert s.max() == exp
def test_round_nat(self):
# GH14940
s = Series([pd.NaT])
expected = Series(pd.NaT)
for method in ["round", "floor", "ceil"]:
round_method = getattr(s.dt, method)
for freq in ["s", "5s", "min", "5min", "h", "5h"]:
assert_series_equal(round_method(freq), expected)
| bsd-3-clause |
micahcochran/geopandas | geopandas/tools/tests/test_sjoin.py | 1 | 10287 | from __future__ import absolute_import
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from shapely.geometry import Point, Polygon
import geopandas
from geopandas import GeoDataFrame, GeoSeries, read_file, base
from geopandas import sjoin
import pytest
from pandas.util.testing import assert_frame_equal
pandas_0_18_problem = 'fails under pandas < 0.19 due to pandas issue 15692,'\
'not problem with sjoin.'
@pytest.fixture()
def dfs(request):
polys1 = GeoSeries(
[Polygon([(0, 0), (5, 0), (5, 5), (0, 5)]),
Polygon([(5, 5), (6, 5), (6, 6), (5, 6)]),
Polygon([(6, 0), (9, 0), (9, 3), (6, 3)])])
polys2 = GeoSeries(
[Polygon([(1, 1), (4, 1), (4, 4), (1, 4)]),
Polygon([(4, 4), (7, 4), (7, 7), (4, 7)]),
Polygon([(7, 7), (10, 7), (10, 10), (7, 10)])])
df1 = GeoDataFrame({'geometry': polys1, 'df1': [0, 1, 2]})
df2 = GeoDataFrame({'geometry': polys2, 'df2': [3, 4, 5]})
if request.param == 'string-index':
df1.index = ['a', 'b', 'c']
df2.index = ['d', 'e', 'f']
# construction expected frames
expected = {}
part1 = df1.copy().reset_index().rename(
columns={'index': 'index_left'})
part2 = df2.copy().iloc[[0, 1, 1, 2]].reset_index().rename(
columns={'index': 'index_right'})
part1['_merge'] = [0, 1, 2]
part2['_merge'] = [0, 0, 1, 3]
exp = pd.merge(part1, part2, on='_merge', how='outer')
expected['intersects'] = exp.drop('_merge', axis=1).copy()
part1 = df1.copy().reset_index().rename(
columns={'index': 'index_left'})
part2 = df2.copy().reset_index().rename(
columns={'index': 'index_right'})
part1['_merge'] = [0, 1, 2]
part2['_merge'] = [0, 3, 3]
exp = pd.merge(part1, part2, on='_merge', how='outer')
expected['contains'] = exp.drop('_merge', axis=1).copy()
part1['_merge'] = [0, 1, 2]
part2['_merge'] = [3, 1, 3]
exp = pd.merge(part1, part2, on='_merge', how='outer')
expected['within'] = exp.drop('_merge', axis=1).copy()
return [request.param, df1, df2, expected]
@pytest.mark.skipif(not base.HAS_SINDEX, reason='Rtree absent, skipping')
class TestSpatialJoin:
@pytest.mark.parametrize('dfs', ['default-index', 'string-index'],
indirect=True)
@pytest.mark.parametrize('op', ['intersects', 'contains', 'within'])
def test_inner(self, op, dfs):
index, df1, df2, expected = dfs
res = sjoin(df1, df2, how='inner', op=op)
exp = expected[op].dropna().copy()
exp = exp.drop('geometry_y', axis=1).rename(
columns={'geometry_x': 'geometry'})
exp[['df1', 'df2']] = exp[['df1', 'df2']].astype('int64')
if index == 'default-index':
exp[['index_left', 'index_right']] = \
exp[['index_left', 'index_right']].astype('int64')
exp = exp.set_index('index_left')
exp.index.name = None
assert_frame_equal(res, exp)
@pytest.mark.parametrize('dfs', ['default-index', 'string-index'],
indirect=True)
@pytest.mark.parametrize('op', ['intersects', 'contains', 'within'])
def test_left(self, op, dfs):
index, df1, df2, expected = dfs
res = sjoin(df1, df2, how='left', op=op)
exp = expected[op].dropna(subset=['index_left']).copy()
exp = exp.drop('geometry_y', axis=1).rename(
columns={'geometry_x': 'geometry'})
exp['df1'] = exp['df1'].astype('int64')
if index == 'default-index':
exp['index_left'] = exp['index_left'].astype('int64')
# TODO: in result the dtype is object
res['index_right'] = res['index_right'].astype(float)
exp = exp.set_index('index_left')
exp.index.name = None
assert_frame_equal(res, exp)
@pytest.mark.parametrize('dfs', ['default-index', 'string-index'],
indirect=True)
@pytest.mark.parametrize('op', ['intersects', 'contains', 'within'])
def test_right(self, op, dfs):
index, df1, df2, expected = dfs
res = sjoin(df1, df2, how='right', op=op)
exp = expected[op].dropna(subset=['index_right']).copy()
exp = exp.drop('geometry_x', axis=1).rename(
columns={'geometry_y': 'geometry'})
exp['df2'] = exp['df2'].astype('int64')
if index == 'default-index':
exp['index_right'] = exp['index_right'].astype('int64')
res['index_left'] = res['index_left'].astype(float)
exp = exp.set_index('index_right')
exp = exp.reindex(columns=res.columns)
assert_frame_equal(res, exp, check_index_type=False)
@pytest.mark.skipif(not base.HAS_SINDEX, reason='Rtree absent, skipping')
class TestSpatialJoinNYBB:
def setup_method(self):
nybb_filename = geopandas.datasets.get_path('nybb')
self.polydf = read_file(nybb_filename)
self.crs = self.polydf.crs
N = 20
b = [int(x) for x in self.polydf.total_bounds]
self.pointdf = GeoDataFrame(
[{'geometry': Point(x, y),
'pointattr1': x + y, 'pointattr2': x - y}
for x, y in zip(range(b[0], b[2], int((b[2]-b[0])/N)),
range(b[1], b[3], int((b[3]-b[1])/N)))],
crs=self.crs)
def test_geometry_name(self):
# test sjoin is working with other geometry name
polydf_original_geom_name = self.polydf.geometry.name
self.polydf = (self.polydf.rename(columns={'geometry': 'new_geom'})
.set_geometry('new_geom'))
assert polydf_original_geom_name != self.polydf.geometry.name
res = sjoin(self.polydf, self.pointdf, how="left")
assert self.polydf.geometry.name == res.geometry.name
def test_sjoin_left(self):
df = sjoin(self.pointdf, self.polydf, how='left')
assert df.shape == (21, 8)
for i, row in df.iterrows():
assert row.geometry.type == 'Point'
assert 'pointattr1' in df.columns
assert 'BoroCode' in df.columns
def test_sjoin_right(self):
# the inverse of left
df = sjoin(self.pointdf, self.polydf, how="right")
df2 = sjoin(self.polydf, self.pointdf, how="left")
assert df.shape == (12, 8)
assert df.shape == df2.shape
for i, row in df.iterrows():
assert row.geometry.type == 'MultiPolygon'
for i, row in df2.iterrows():
assert row.geometry.type == 'MultiPolygon'
def test_sjoin_inner(self):
df = sjoin(self.pointdf, self.polydf, how="inner")
assert df.shape == (11, 8)
def test_sjoin_op(self):
# points within polygons
df = sjoin(self.pointdf, self.polydf, how="left", op="within")
assert df.shape == (21, 8)
assert df.ix[1]['BoroName'] == 'Staten Island'
# points contain polygons? never happens so we should have nulls
df = sjoin(self.pointdf, self.polydf, how="left", op="contains")
assert df.shape == (21, 8)
assert np.isnan(df.ix[1]['Shape_Area'])
def test_sjoin_bad_op(self):
# AttributeError: 'Point' object has no attribute 'spandex'
with pytest.raises(ValueError):
sjoin(self.pointdf, self.polydf, how="left", op="spandex")
def test_sjoin_duplicate_column_name(self):
pointdf2 = self.pointdf.rename(columns={'pointattr1': 'Shape_Area'})
df = sjoin(pointdf2, self.polydf, how="left")
assert 'Shape_Area_left' in df.columns
assert 'Shape_Area_right' in df.columns
def test_sjoin_values(self):
# GH190
self.polydf.index = [1, 3, 4, 5, 6]
df = sjoin(self.pointdf, self.polydf, how='left')
assert df.shape == (21, 8)
df = sjoin(self.polydf, self.pointdf, how='left')
assert df.shape == (12, 8)
@pytest.mark.skipif(str(pd.__version__) < LooseVersion('0.19'),
reason=pandas_0_18_problem)
@pytest.mark.xfail
def test_no_overlapping_geometry(self):
# Note: these tests are for correctly returning GeoDataFrame
# when result of the join is empty
df_inner = sjoin(self.pointdf.iloc[17:], self.polydf, how='inner')
df_left = sjoin(self.pointdf.iloc[17:], self.polydf, how='left')
df_right = sjoin(self.pointdf.iloc[17:], self.polydf, how='right')
# Recent Pandas development has introduced a new way of handling merges
# this change has altered the output when no overlapping geometries
if str(pd.__version__) > LooseVersion('0.18.1'):
right_idxs = pd.Series(range(0, 5), name='index_right',
dtype='int64')
else:
right_idxs = pd.Series(name='index_right', dtype='int64')
expected_inner_df = pd.concat(
[self.pointdf.iloc[:0],
pd.Series(name='index_right', dtype='int64'),
self.polydf.drop('geometry', axis=1).iloc[:0]],
axis=1)
expected_inner = GeoDataFrame(
expected_inner_df, crs={'init': 'epsg:4326', 'no_defs': True})
expected_right_df = pd.concat(
[self.pointdf.drop('geometry', axis=1).iloc[:0],
pd.concat([pd.Series(name='index_left', dtype='int64'),
right_idxs],
axis=1),
self.polydf],
axis=1)
expected_right = GeoDataFrame(
expected_right_df, crs={'init': 'epsg:4326', 'no_defs': True})\
.set_index('index_right')
expected_left_df = pd.concat(
[self.pointdf.iloc[17:],
pd.Series(name='index_right', dtype='int64'),
self.polydf.iloc[:0].drop('geometry', axis=1)],
axis=1)
expected_left = GeoDataFrame(
expected_left_df, crs={'init': 'epsg:4326', 'no_defs': True})
assert expected_inner.equals(df_inner)
assert expected_right.equals(df_right)
assert expected_left.equals(df_left)
@pytest.mark.skip("Not implemented")
def test_sjoin_outer(self):
df = sjoin(self.pointdf, self.polydf, how="outer")
assert df.shape == (21, 8)
| bsd-3-clause |
sumspr/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
xhochy/arrow | python/pyarrow/tests/test_hdfs.py | 1 | 13325 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pickle
import pytest
import random
import unittest
from io import BytesIO
from os.path import join as pjoin
import numpy as np
import pyarrow as pa
import pyarrow.tests.test_parquet as test_parquet
from pyarrow.pandas_compat import _pandas_api
from pyarrow.tests import util
from pyarrow.util import guid
# ----------------------------------------------------------------------
# HDFS tests
def hdfs_test_client():
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')
user = os.environ.get('ARROW_HDFS_TEST_USER', None)
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
with pytest.warns(DeprecationWarning):
return pa.hdfs.connect(host, port, user)
@pytest.mark.hdfs
class HdfsTestCases:
def _make_test_file(self, hdfs, test_name, test_path, test_data):
base_path = pjoin(self.tmp_path, test_name)
hdfs.mkdir(base_path)
full_path = pjoin(base_path, test_path)
with hdfs.open(full_path, 'wb') as f:
f.write(test_data)
return full_path
@classmethod
def setUpClass(cls):
cls.check_driver()
cls.hdfs = hdfs_test_client()
cls.tmp_path = '/tmp/pyarrow-test-{}'.format(random.randint(0, 1000))
cls.hdfs.mkdir(cls.tmp_path)
@classmethod
def tearDownClass(cls):
cls.hdfs.delete(cls.tmp_path, recursive=True)
cls.hdfs.close()
def test_pickle(self):
s = pickle.dumps(self.hdfs)
h2 = pickle.loads(s)
assert h2.is_open
assert h2.host == self.hdfs.host
assert h2.port == self.hdfs.port
assert h2.user == self.hdfs.user
assert h2.kerb_ticket == self.hdfs.kerb_ticket
# smoketest unpickled client works
h2.ls(self.tmp_path)
def test_cat(self):
path = pjoin(self.tmp_path, 'cat-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
contents = self.hdfs.cat(path)
assert contents == data
def test_capacity_space(self):
capacity = self.hdfs.get_capacity()
space_used = self.hdfs.get_space_used()
disk_free = self.hdfs.df()
assert capacity > 0
assert capacity > space_used
assert disk_free == (capacity - space_used)
def test_close(self):
client = hdfs_test_client()
assert client.is_open
client.close()
assert not client.is_open
with pytest.raises(Exception):
client.ls('/')
def test_mkdir(self):
path = pjoin(self.tmp_path, 'test-dir/test-dir')
parent_path = pjoin(self.tmp_path, 'test-dir')
self.hdfs.mkdir(path)
assert self.hdfs.exists(path)
self.hdfs.delete(parent_path, recursive=True)
assert not self.hdfs.exists(path)
def test_mv_rename(self):
path = pjoin(self.tmp_path, 'mv-test')
new_path = pjoin(self.tmp_path, 'mv-new-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
assert self.hdfs.exists(path)
self.hdfs.mv(path, new_path)
assert not self.hdfs.exists(path)
assert self.hdfs.exists(new_path)
assert self.hdfs.cat(new_path) == data
self.hdfs.rename(new_path, path)
assert self.hdfs.cat(path) == data
def test_info(self):
path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(path, 'ex')
self.hdfs.mkdir(path)
data = b'foobarbaz'
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
path_info = self.hdfs.info(path)
file_path_info = self.hdfs.info(file_path)
assert path_info['kind'] == 'directory'
assert file_path_info['kind'] == 'file'
assert file_path_info['size'] == len(data)
def test_exists_isdir_isfile(self):
dir_path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(dir_path, 'ex')
missing_path = pjoin(dir_path, 'this-path-is-missing')
self.hdfs.mkdir(dir_path)
with self.hdfs.open(file_path, 'wb') as f:
f.write(b'foobarbaz')
assert self.hdfs.exists(dir_path)
assert self.hdfs.exists(file_path)
assert not self.hdfs.exists(missing_path)
assert self.hdfs.isdir(dir_path)
assert not self.hdfs.isdir(file_path)
assert not self.hdfs.isdir(missing_path)
assert not self.hdfs.isfile(dir_path)
assert self.hdfs.isfile(file_path)
assert not self.hdfs.isfile(missing_path)
def test_disk_usage(self):
path = pjoin(self.tmp_path, 'disk-usage-base')
p1 = pjoin(path, 'p1')
p2 = pjoin(path, 'p2')
subdir = pjoin(path, 'subdir')
p3 = pjoin(subdir, 'p3')
if self.hdfs.exists(path):
self.hdfs.delete(path, True)
self.hdfs.mkdir(path)
self.hdfs.mkdir(subdir)
data = b'foobarbaz'
for file_path in [p1, p2, p3]:
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
assert self.hdfs.disk_usage(path) == len(data) * 3
def test_ls(self):
base_path = pjoin(self.tmp_path, 'ls-test')
self.hdfs.mkdir(base_path)
dir_path = pjoin(base_path, 'a-dir')
f1_path = pjoin(base_path, 'a-file-1')
self.hdfs.mkdir(dir_path)
f = self.hdfs.open(f1_path, 'wb')
f.write(b'a' * 10)
contents = sorted(self.hdfs.ls(base_path, False))
assert contents == [dir_path, f1_path]
def test_chmod_chown(self):
path = pjoin(self.tmp_path, 'chmod-test')
with self.hdfs.open(path, 'wb') as f:
f.write(b'a' * 10)
def test_download_upload(self):
base_path = pjoin(self.tmp_path, 'upload-test')
data = b'foobarbaz'
buf = BytesIO(data)
buf.seek(0)
self.hdfs.upload(base_path, buf)
out_buf = BytesIO()
self.hdfs.download(base_path, out_buf)
out_buf.seek(0)
assert out_buf.getvalue() == data
def test_file_context_manager(self):
path = pjoin(self.tmp_path, 'ctx-manager')
data = b'foo'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
assert f.size() == 3
result = f.read(10)
assert result == data
def test_open_not_exist_error_message(self):
# ARROW-226
path = pjoin(self.tmp_path, 'does-not-exist-123')
try:
self.hdfs.open(path)
except Exception as e:
assert 'file does not exist' in e.args[0].lower()
def test_read_whole_file(self):
path = pjoin(self.tmp_path, 'read-whole-file')
data = b'foo' * 1000
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
result = f.read()
assert result == data
def _write_multiple_hdfs_pq_files(self, tmpdir):
import pyarrow.parquet as pq
nfiles = 10
size = 5
test_data = []
for i in range(nfiles):
df = test_parquet._test_dataframe(size, seed=i)
df['index'] = np.arange(i * size, (i + 1) * size)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = pjoin(tmpdir, '{}.parquet'.format(i))
table = pa.Table.from_pandas(df, preserve_index=False)
with self.hdfs.open(path, 'wb') as f:
pq.write_table(table, f)
test_data.append(table)
expected = pa.concat_tables(test_data)
return expected
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files(self):
tmpdir = pjoin(self.tmp_path, 'multi-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
result = self.hdfs.read_parquet(tmpdir)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'multi-parquet-uri-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
path = _get_hdfs_uri(tmpdir)
# TODO for URI it should not be needed to pass this argument
result = pq.read_table(path, use_legacy_dataset=True)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_write_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'uri-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
path = _get_hdfs_uri(pjoin(tmpdir, 'test.parquet'))
size = 5
df = test_parquet._test_dataframe(size, seed=0)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
table = pa.Table.from_pandas(df, preserve_index=False)
pq.write_table(table, path, filesystem=self.hdfs)
result = pq.read_table(
path, filesystem=self.hdfs, use_legacy_dataset=True
).to_pandas()
_pandas_api.assert_frame_equal(result, df)
@pytest.mark.parquet
@pytest.mark.pandas
def test_read_common_metadata_files(self):
tmpdir = pjoin(self.tmp_path, 'common-metadata-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_read_common_metadata_files(self.hdfs, tmpdir)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_to_dataset_with_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_with_partitions(
tmpdir, filesystem=self.hdfs)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_to_dataset_no_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-no_partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_no_partitions(
tmpdir, filesystem=self.hdfs)
class TestLibHdfs(HdfsTestCases, unittest.TestCase):
@classmethod
def check_driver(cls):
if not pa.have_libhdfs():
message = 'No libhdfs available on system'
if os.environ.get('PYARROW_HDFS_TEST_LIBHDFS_REQUIRE'):
pytest.fail(message)
else:
pytest.skip(message)
def test_orphaned_file(self):
hdfs = hdfs_test_client()
file_path = self._make_test_file(hdfs, 'orphaned_file_test', 'fname',
b'foobarbaz')
f = hdfs.open(file_path)
hdfs = None
f = None # noqa
def _get_hdfs_uri(path):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'localhost')
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
uri = "hdfs://{}:{}{}".format(host, port, path)
return uri
@pytest.mark.hdfs
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.fastparquet
def test_fastparquet_read_with_hdfs():
from pandas.testing import assert_frame_equal
try:
import snappy # noqa
except ImportError:
pytest.skip('fastparquet test requires snappy')
import pyarrow.parquet as pq
fastparquet = pytest.importorskip('fastparquet')
fs = hdfs_test_client()
df = util.make_dataframe()
table = pa.Table.from_pandas(df)
path = '/tmp/testing.parquet'
with fs.open(path, 'wb') as f:
pq.write_table(table, f)
parquet_file = fastparquet.ParquetFile(path, open_with=fs.open)
result = parquet_file.to_pandas()
assert_frame_equal(result, df)
| apache-2.0 |
MadsJensen/malthe_alpha_project | source_connectivity_permutation.py | 1 | 6505 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 08:41:17 2015.
@author: mje
"""
import numpy as np
import numpy.random as npr
import os
import socket
import mne
# import pandas as pd
from mne.connectivity import spectral_connectivity
from mne.minimum_norm import (apply_inverse_epochs, read_inverse_operator)
# Permutation test.
def permutation_resampling(case, control, num_samples, statistic):
"""
Permutation test.
Return p-value that statistic for case is different
from statistc for control.
"""
observed_diff = abs(statistic(case) - statistic(control))
num_case = len(case)
combined = np.concatenate([case, control])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_case]) - np.mean(xs[num_case:])
diffs.append(diff)
pval = (np.sum(diffs > observed_diff) +
np.sum(diffs < -observed_diff))/float(num_samples)
return pval, observed_diff, diffs
def permutation_test(a, b, num_samples, statistic):
"""
Permutation test.
Return p-value that statistic for a is different
from statistc for b.
"""
observed_diff = abs(statistic(b) - statistic(a))
num_a = len(a)
combined = np.concatenate([a, b])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_a]) - np.mean(xs[num_a:])
diffs.append(diff)
pval = np.sum(np.abs(diffs) >= np.abs(observed_diff)) / float(num_samples)
return pval, observed_diff, diffs
# Setup paths and prepare raw data
hostname = socket.gethostname()
if hostname == "Wintermute":
data_path = "/home/mje/mnt/caa/scratch/"
n_jobs = 1
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
n_jobs = 1
subjects_dir = data_path + "fs_subjects_dir/"
# change dir to save files the rigth place
os.chdir(data_path)
fname_inv = data_path + '0001-meg-oct-6-inv.fif'
fname_epochs = data_path + '0001_p_03_filter_ds_ica-mc_tsss-epo.fif'
fname_evoked = data_path + "0001_p_03_filter_ds_ica-mc_raw_tsss-ave.fif"
# Parameters
snr = 1.0 # Standard assumption for average data but using it for single trial
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
inverse_operator = read_inverse_operator(fname_inv)
epochs = mne.read_epochs(fname_epochs)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
#labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Lobes',
labels = mne.read_labels_from_annot('0001', parc='PALS_B12_Brodmann',
regexp="Brodmann",
subjects_dir=subjects_dir)
labels_occ = labels[6:12]
# labels = mne.read_labels_from_annot('subject_1', parc='aparc.DKTatlas40',
# subjects_dir=subjects_dir)
for cond in epochs.event_id.keys():
stcs = apply_inverse_epochs(epochs[cond], inverse_operator, lambda2,
method, pick_ori="normal")
exec("stcs_%s = stcs" % cond)
labels_name = [label.name for label in labels_occ]
for label in labels_occ:
labels_name += [label.name]
# Extract time series
ts_ctl_left = mne.extract_label_time_course(stcs_ctl_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip")
ts_ent_left = mne.extract_label_time_course(stcs_ent_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip")
stcs_all_left = stcs_ctl_left + stcs_ent_left
ts_all_left = np.asarray(mne.extract_label_time_course(stcs_all_left,
labels_occ,
src=inverse_operator["src"],
mode = "mean_flip"))
number_of_permutations = 2000
index = np.arange(0, len(ts_all_left))
permutations_results = np.empty(number_of_permutations)
fmin, fmax = 7, 12
tmin, tmax = 0, 1
con_method = "plv"
diff_permuatation = np.empty([6, 6, number_of_permutations])
# diff
con_ctl, freqs_ctl, times_ctl, n_epochs_ctl, n_tapers_ctl =\
spectral_connectivity(
ts_ctl_left,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1,
verbose=None)
con_ent, freqs_ent, times_ent, n_epochs_ent, n_tapers_ent =\
spectral_connectivity(
ts_ent_left,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1,
verbose=None)
diff = con_ctl[:, :, 0] - con_ent[:, :, 0]
for i in range(number_of_permutations):
index = np.random.permutation(index)
tmp_ctl = ts_all_left[index[:64], :, :]
tmp_case = ts_all_left[index[64:], :, :]
con_ctl, freqs_ctl, times_ctl, n_epochs_ctl, n_tapers_ctl =\
spectral_connectivity(
tmp_ctl,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1)
con_case, freqs_case, times_case, n_epochs_case, n_tapers_case =\
spectral_connectivity(
tmp_case,
method=con_method,
mode='multitaper',
sfreq=250,
fmin=fmin, fmax=fmax,
faverage=True,
tmin=tmin, tmax=tmax,
mt_adaptive=False,
n_jobs=1)
diff_permuatation[:, :, i] = con_ctl[:, :, 0] - con_case[:, :, 0]
pval = np.empty_like(diff)
for h in range(diff.shape[0]):
for j in range(diff.shape[1]):
if diff[h, j] != 0:
pval[h, j] = np.sum(np.abs(diff_permuatation[h, h, :] >=
np.abs(diff[h, j, :])))/float(number_of_permutations)
# np.sum(np.abs(diff[h, j]) >= np.abs(
# diff_permuatation[h, j, :]))\
# / float(number_of_permutations)
| mit |
pianomania/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 16 | 50617 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from ..utils import deprecated
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function_, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.n_jobs = int(n_jobs)
@property
@deprecated("Attribute loss_function was deprecated in version 0.19 and "
"will be removed in 0.21. Use 'loss_function_' instead")
def loss_function(self):
return self.loss_function_
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
loss_function_ : concrete ``LossFunction``
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "average_coef_", None) is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
Jim61C/VTT_Show_Atten_And_Tell | prepro.py | 4 | 8670 | from scipy import ndimage
from collections import Counter
from core.vggnet import Vgg19
from core.utils import *
import tensorflow as tf
import numpy as np
import pandas as pd
import hickle
import os
import json
def _process_caption_data(caption_file, image_dir, max_length):
with open(caption_file) as f:
caption_data = json.load(f)
# id_to_filename is a dictionary such as {image_id: filename]}
id_to_filename = {image['id']: image['file_name'] for image in caption_data['images']}
# data is a list of dictionary which contains 'captions', 'file_name' and 'image_id' as key.
data = []
for annotation in caption_data['annotations']:
image_id = annotation['image_id']
annotation['file_name'] = os.path.join(image_dir, id_to_filename[image_id])
data += [annotation]
# convert to pandas dataframe (for later visualization or debugging)
caption_data = pd.DataFrame.from_dict(data)
del caption_data['id']
caption_data.sort_values(by='image_id', inplace=True)
caption_data = caption_data.reset_index(drop=True)
del_idx = []
for i, caption in enumerate(caption_data['caption']):
caption = caption.replace('.','').replace(',','').replace("'","").replace('"','')
caption = caption.replace('&','and').replace('(','').replace(")","").replace('-',' ')
caption = " ".join(caption.split()) # replace multiple spaces
caption_data.set_value(i, 'caption', caption.lower())
if len(caption.split(" ")) > max_length:
del_idx.append(i)
# delete captions if size is larger than max_length
print "The number of captions before deletion: %d" %len(caption_data)
caption_data = caption_data.drop(caption_data.index[del_idx])
caption_data = caption_data.reset_index(drop=True)
print "The number of captions after deletion: %d" %len(caption_data)
return caption_data
def _build_vocab(annotations, threshold=1):
counter = Counter()
max_len = 0
for i, caption in enumerate(annotations['caption']):
words = caption.split(' ') # caption contrains only lower-case words
for w in words:
counter[w] +=1
if len(caption.split(" ")) > max_len:
max_len = len(caption.split(" "))
vocab = [word for word in counter if counter[word] >= threshold]
print ('Filtered %d words to %d words with word count threshold %d.' % (len(counter), len(vocab), threshold))
word_to_idx = {u'<NULL>': 0, u'<START>': 1, u'<END>': 2}
idx = 3
for word in vocab:
word_to_idx[word] = idx
idx += 1
print "Max length of caption: ", max_len
return word_to_idx
def _build_caption_vector(annotations, word_to_idx, max_length=15):
n_examples = len(annotations)
captions = np.ndarray((n_examples,max_length+2)).astype(np.int32)
for i, caption in enumerate(annotations['caption']):
words = caption.split(" ") # caption contrains only lower-case words
cap_vec = []
cap_vec.append(word_to_idx['<START>'])
for word in words:
if word in word_to_idx:
cap_vec.append(word_to_idx[word])
cap_vec.append(word_to_idx['<END>'])
# pad short caption with the special null token '<NULL>' to make it fixed-size vector
if len(cap_vec) < (max_length + 2):
for j in range(max_length + 2 - len(cap_vec)):
cap_vec.append(word_to_idx['<NULL>'])
captions[i, :] = np.asarray(cap_vec)
print "Finished building caption vectors"
return captions
def _build_file_names(annotations):
image_file_names = []
id_to_idx = {}
idx = 0
image_ids = annotations['image_id']
file_names = annotations['file_name']
for image_id, file_name in zip(image_ids, file_names):
if not image_id in id_to_idx:
id_to_idx[image_id] = idx
image_file_names.append(file_name)
idx += 1
file_names = np.asarray(image_file_names)
return file_names, id_to_idx
def _build_image_idxs(annotations, id_to_idx):
image_idxs = np.ndarray(len(annotations), dtype=np.int32)
image_ids = annotations['image_id']
for i, image_id in enumerate(image_ids):
image_idxs[i] = id_to_idx[image_id]
return image_idxs
def main():
# batch size for extracting feature vectors from vggnet.
batch_size = 100
# maximum length of caption(number of word). if caption is longer than max_length, deleted.
max_length = 15
# if word occurs less than word_count_threshold in training dataset, the word index is special unknown token.
word_count_threshold = 1
# vgg model path
vgg_model_path = './data/imagenet-vgg-verydeep-19.mat'
caption_file = 'data/annotations/captions_train2014.json'
image_dir = 'image/%2014_resized/'
# about 80000 images and 400000 captions for train dataset
train_dataset = _process_caption_data(caption_file='data/annotations/captions_train2014.json',
image_dir='image/train2014_resized/',
max_length=max_length)
# about 40000 images and 200000 captions
val_dataset = _process_caption_data(caption_file='data/annotations/captions_val2014.json',
image_dir='image/val2014_resized/',
max_length=max_length)
# about 4000 images and 20000 captions for val / test dataset
val_cutoff = int(0.1 * len(val_dataset))
test_cutoff = int(0.2 * len(val_dataset))
print 'Finished processing caption data'
save_pickle(train_dataset, 'data/train/train.annotations.pkl')
save_pickle(val_dataset[:val_cutoff], 'data/val/val.annotations.pkl')
save_pickle(val_dataset[val_cutoff:test_cutoff].reset_index(drop=True), 'data/test/test.annotations.pkl')
for split in ['train', 'val', 'test']:
annotations = load_pickle('./data/%s/%s.annotations.pkl' % (split, split))
if split == 'train':
word_to_idx = _build_vocab(annotations=annotations, threshold=word_count_threshold)
save_pickle(word_to_idx, './data/%s/word_to_idx.pkl' % split)
captions = _build_caption_vector(annotations=annotations, word_to_idx=word_to_idx, max_length=max_length)
save_pickle(captions, './data/%s/%s.captions.pkl' % (split, split))
file_names, id_to_idx = _build_file_names(annotations)
save_pickle(file_names, './data/%s/%s.file.names.pkl' % (split, split))
image_idxs = _build_image_idxs(annotations, id_to_idx)
save_pickle(image_idxs, './data/%s/%s.image.idxs.pkl' % (split, split))
# prepare reference captions to compute bleu scores later
image_ids = {}
feature_to_captions = {}
i = -1
for caption, image_id in zip(annotations['caption'], annotations['image_id']):
if not image_id in image_ids:
image_ids[image_id] = 0
i += 1
feature_to_captions[i] = []
feature_to_captions[i].append(caption.lower() + ' .')
save_pickle(feature_to_captions, './data/%s/%s.references.pkl' % (split, split))
print "Finished building %s caption dataset" %split
# extract conv5_3 feature vectors
vggnet = Vgg19(vgg_model_path)
vggnet.build()
with tf.Session() as sess:
tf.initialize_all_variables().run()
for split in ['train', 'val', 'test']:
anno_path = './data/%s/%s.annotations.pkl' % (split, split)
save_path = './data/%s/%s.features.hkl' % (split, split)
annotations = load_pickle(anno_path)
image_path = list(annotations['file_name'].unique())
n_examples = len(image_path)
all_feats = np.ndarray([n_examples, 196, 512], dtype=np.float32)
for start, end in zip(range(0, n_examples, batch_size),
range(batch_size, n_examples + batch_size, batch_size)):
image_batch_file = image_path[start:end]
image_batch = np.array(map(lambda x: ndimage.imread(x, mode='RGB'), image_batch_file)).astype(
np.float32)
feats = sess.run(vggnet.features, feed_dict={vggnet.images: image_batch})
all_feats[start:end, :] = feats
print ("Processed %d %s features.." % (end, split))
# use hickle to save huge feature vectors
hickle.dump(all_feats, save_path)
print ("Saved %s.." % (save_path))
if __name__ == "__main__":
main() | mit |
idlead/scikit-learn | sklearn/externals/joblib/__init__.py | 23 | 4764 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
poryfly/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
garrettkatz/directional-fibers | dfibers/experiments/levy_opt/levy_opt.py | 1 | 6952 | """
Measure global optimization performance of Levy function
"""
import sys, time
import numpy as np
import matplotlib.pyplot as pt
import multiprocessing as mp
import dfibers.traversal as tv
import dfibers.numerical_utilities as nu
import dfibers.logging_utilities as lu
import dfibers.fixed_points as fx
import dfibers.solvers as sv
import dfibers.examples.levy as lv
from mpl_toolkits.mplot3d import Axes3D
def run_trial(args):
basename, sample, timeout = args
stop_time = time.clock() + timeout
logfile = open("%s_s%d.log"%(basename,sample),"w")
# Set up fiber arguments
np.random.seed()
v = 20*np.random.rand(2,1) - 10 # random point in domain
c = lv.f(v) # direction at that point
c = c + 0.1*np.random.randn(2,1) # perturb for more variability
fiber_kwargs = {
"f": lv.f,
"ef": lv.ef,
"Df": lv.Df,
"compute_step_amount": lambda trace: (0.0001, 0),
"v": v,
"c": c,
"stop_time": stop_time,
"terminate": lambda trace: (np.fabs(trace.x[:-1]) > 10).any(),
"max_solve_iterations": 2**5,
}
solve_start = time.clock()
# Run in one direction
solution = sv.fiber_solver(
logger=lu.Logger(logfile).plus_prefix("+: "),
**fiber_kwargs)
X1 = np.concatenate(solution["Fiber trace"].points, axis=1)
V1 = solution["Fixed points"]
z = solution["Fiber trace"].z_initial
# print("Status: %s\n"%solution["Fiber trace"].status)
# Run in other direction (negate initial tangent)
solution = sv.fiber_solver(
z= -z,
logger=lu.Logger(logfile).plus_prefix("-: "),
**fiber_kwargs)
X2 = np.concatenate(solution["Fiber trace"].points, axis=1)
V2 = solution["Fixed points"]
# print("Status: %s\n"%solution["Fiber trace"].status)
# Join fiber segments
fiber = np.concatenate((np.fliplr(X1), X2), axis=1)
# Union solutions
fxpts = fx.sanitize_points(
np.concatenate((V1, V2), axis=1),
f = lv.f,
ef = lv.ef,
Df = lv.Df,
duplicates = lambda V, v: (np.fabs(V - v) < 10**-6).all(axis=0),
)
# Save results
with open("%s_s%d.npz"%(basename,sample), 'w') as rf: np.savez(rf, **{
"fxpts": fxpts,
"fiber": fiber,
"runtime": time.clock() - solve_start })
logfile.close()
def run_experiment(basename, num_samples, timeout, num_procs=0):
pool_args = []
for sample in range(num_samples):
pool_args.append((basename, sample, timeout))
if num_procs > 0:
num_procs = min(num_procs, mp.cpu_count())
print("using %d processes..."%num_procs)
pool = mp.Pool(processes=num_procs)
pool.map(run_trial, pool_args)
pool.close()
pool.join()
else:
for pa in pool_args: run_trial(pa)
def compile_results(basename, num_samples):
L = []
F = []
runtimes = []
for sample in range(num_samples):
with open("%s_s%d.npz"%(basename,sample), 'r') as rf: data = dict(np.load(rf))
fxpts = data["fxpts"]
Fs = np.fabs(lv.f(fxpts)).max(axis=0)
Ls = lv.levy(fxpts)
within = (np.fabs(fxpts) < 10).all(axis=0)
mean_within = Ls[within].mean() if within.any() else np.nan
print("sample %d: %d secs, %d solns, mean %f, mean within %f, min %f"%(
sample, data["runtime"], len(Ls), Ls.mean(), mean_within, Ls.min()))
L.append(Ls)
F.append(Fs)
runtimes.append(data["runtime"])
counts = np.array([len(Ls) for Ls in L])
bests = np.array([Ls.min() for Ls in L])
resids = np.array([Fs.max() for Fs in F])
runtimes = np.array(runtimes)
print("avg count = %d, avg best = %f, avg resid = %f, best best = %f"%(
counts.mean(), bests.mean(), resids.mean(), bests.min()))
return counts, bests, runtimes
def plot_results(basename, num_samples, counts, bests, runtimes, timeout):
### Optimization order stats
pt.figure(figsize=(5,4))
pt.subplot(2,1,1)
pt.plot(np.sort(bests), '-k.')
pt.xlabel("Ordered samples")
pt.ylabel("Best objective value")
##### Work complexity
pt.subplot(2,1,2)
terms = (runtimes < timeout)
pt.plot(runtimes[terms], bests[terms], 'k+', markerfacecolor='none')
pt.plot(runtimes[~terms], bests[~terms], 'ko', markerfacecolor='none')
pt.legend(["terminated","timed out"])
pt.xlabel("Runtime (seconds)")
pt.ylabel("Best objective value")
pt.tight_layout()
pt.show()
### Fiber visuals
pt.figure(figsize=(4,7))
# objective fun
X_surface, Y_surface = np.mgrid[-10:10:100j,-10:10:100j]
L = lv.levy(np.array([X_surface.flatten(), Y_surface.flatten()])).reshape(X_surface.shape)
ax_surface = pt.gcf().add_subplot(2,1,1,projection="3d")
ax_surface.plot_surface(X_surface, Y_surface, L, linewidth=0, antialiased=False, color='gray')
ax_surface.set_xlabel("v0")
ax_surface.set_ylabel("v1")
ax_surface.set_zlabel("levy(v)")
ax_surface.view_init(azim=-80, elev=20)
# fibers
ax = pt.gcf().add_subplot(2,1,2)
X_grid, Y_grid = np.mgrid[-10:10:60j,-10:10:60j]
XY = np.array([X_grid.flatten(), Y_grid.flatten()])
C_XY = lv.f(XY)
ax.quiver(XY[0,:],XY[1,:],C_XY[0,:],C_XY[1,:],color=0.5*np.ones((1,3)),
scale=10,units='xy',angles='xy')
num_plot_samples = 3
sort_idx = np.argsort(bests)
plot_idx = [0] + list(np.random.permutation(num_samples)[:num_plot_samples-1])
samples = sort_idx[plot_idx]
# samples = [41,73,20] # all through global
# samples = [41, 97, 11] # two through global
# samples = [41, 49, 13] # two through global, one horiz not through
# samples = [41, 46, 70] # one through global, one horiz
# samples = [41, 96, 27] # two through global, one almost horiz
samples = [41, 63, 28] # two through global, all interesting
print("samples:")
print(samples)
for i,sample in enumerate(samples[::-1]):
with open("%s_s%d.npz"%(basename,sample), 'r') as rf: data = dict(np.load(rf))
fxpts = data["fxpts"]
fiber = data["fiber"][:,::]
L = lv.levy(fxpts).min()
col = 0.5*float(num_plot_samples-i-1)/num_plot_samples
print(sample,col)
ax.plot(fiber[0],fiber[1],color=(col,col,col,1), linestyle='-', linewidth=1)
pt.plot(fxpts[0],fxpts[1], 'o', color=(col,col,col,1))
pt.xlabel("v0")
pt.ylabel("v1",rotation=0)
pt.yticks(np.linspace(-10,10,5))
pt.xlim([-10,10])
pt.ylim([-10,10])
pt.tight_layout()
pt.show()
if __name__ == "__main__":
basename = "levy_opt"
num_samples = 100
num_plot_samples = 3
timeout = 60*30
num_procs = 10
# run_experiment(basename, num_samples=num_samples, timeout=timeout, num_procs=num_procs)
counts, bests, runtimes = compile_results(basename, num_samples)
plot_results(basename, num_samples, counts, bests, runtimes, timeout)
| mit |
Vimos/scikit-learn | sklearn/kernel_approximation.py | 7 | 18505 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features. All values of X must be
strictly greater than "-skewedness".
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X <= -self.skewedness).any():
raise ValueError("X may not contain entries smaller than"
" -skewedness.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
GitYiheng/reinforcement_learning_test | test00_previous_files/mountaincar_q_learning.py | 1 | 4304 | import gym
import os
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from gym import wrappers
from datetime import datetime
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import RBFSampler
from sklearn.linear_model import SGDRegressor
class FeatureTransformer:
def __init__(self, env, n_components=500):
observation_examples = np.array([env.observation_space.sample() for x in range(1000)])
scaler = StandardScaler()
scaler.fit(observation_examples)
featurizer = FeatureUnion([
("rbf1", RBFSampler(gamma=5.0, n_components=n_components)),
("rbf2", RBFSampler(gamma=4.0, n_components=n_components)),
("rbf3", RBFSampler(gamma=3.0, n_components=n_components)),
("rbf4", RBFSampler(gamma=2.0, n_components=n_components)),
("rbf5", RBFSampler(gamma=1.0, n_components=n_components)),
("rbf6", RBFSampler(gamma=0.5, n_components=n_components)),
])
example_features = featurizer.fit_transform(scaler.transform(observation_examples))
self.dimensions = example_features.shape[1]
self.scaler = scaler
self.featurizer = featurizer
def transform(self, observation):
scaled = self.scaler.transform(observation)
return self.featurizer.transform(scaled)
class Model:
def __init__(self, env, feature_transformer, learning_rate):
self.env = env
self.models = []
self.feature_transformer = feature_transformer
for i in range(env.action_space.n):
model = SGDRegressor(learning_rate)
model.partial_fit(feature_transformer.transform([env.reset()]), [0])
self.models.append(model)
def predict(self, s):
X = self.feature_transformer.transform([s])
assert(len(X.shape) == 2)
return np.array([m.predict(X)[0] for m in self.models])
def update(self, s, a, G):
X = self.feature_transformer.transform([s])
assert(len(X.shape) == 2)
self.models[a].partial_fit(X, [G])
def sample_action(self, s, eps):
if np.random.random() < eps:
return self.env.action_space.sample()
else:
return np.argmax(self.predict(s))
def play_one(model, eps, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 1000:
action = model.sample_action(observation, eps)
prev_observation = observation
observation, reward, done, info = env.step(action)
# Update the model
G = reward + gamma*np.max(model.predict(observation)[0])
model.update(prev_observation, action, G)
totalreward += reward
iters += 1
return totalreward
def plot_cost_to_go(env, estimator, num_tiles=20):
x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles)
y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles)
X, Y = np.meshgrid(x, y)
Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y]))
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Position')
ax.set_ylabel('Velocity')
ax.set_zlabel('Cost-To-Go == -V(s)')
ax.set_title("Cost-To-Go Function")
fig.colorbar(surf)
plt.show()
def plot_running_avg(totalrewards):
N = len(totalrewards)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean()
plt.plot(running_avg)
plt.title("Running Average")
plt.show()
def main():
env = gym.make('MountainCar-v0')
ft = FeatureTransformer(env)
model = Model(env, ft, "constant")
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 300
totalrewards = np.empty(N)
for n in range(N):
eps = 0.1*(0.97**n)
totalreward = play_one(model, eps, gamma)
totalrewards[n] = totalreward
print("episode:", n, "total reward:", totalreward)
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
print("total steps:", -totalrewards.sum())
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
plot_cost_to_go(env, model)
if __name__ == '__main__':
main() | mit |
wilselby/diy_driverless_car_ROS | rover_cv/camera_cal/src/camera_cal/camera_cal.py | 1 | 6503 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#https://github.com/paramaggarwal/CarND-Advanced-Lane-Lines/blob/master/Notebook.ipynb
from __future__ import print_function
from __future__ import division
import sys
import traceback
import rospy
import numpy as np
import cv2
import pickle
import glob
import time
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class camera_calibarion(object):
def __init__(self):
"""ROS Subscriptions """
self.image_pub = rospy.Publisher("/camera_calibation/image_corrected",Image, queue_size=10)
self.image_sub = rospy.Subscriber("/cam/camera_/image_raw",Image,self.cvt_image)
""" Variables """
self.bridge = CvBridge()
self.latestImage = None
self.outputImage = None
self.process = False
self.calibrated = False
self.correctedImage = None
self.mtx = None
self.dist = None
def cvt_image(self,data):
try:
self.latestImage = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
if self.process != True:
self.process = True
def camera_cal(self, image):
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
nx = 8
ny = 6
dst = np.copy(image)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny * nx, 3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Search for chessboard corners
grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#ret_thresh, mask = cv2.threshold(grey, 30, 255, cv2.THRESH_BINARY)
ret, corners = cv2.findChessboardCorners(image, (nx, ny), None) #flags=(cv2.cv.CV_CALIB_CB_ADAPTIVE_THRESH + cv2.cv.CV_CALIB_CB_FILTER_QUADS))
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
cv2.cornerSubPix(grey,corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
self.calibrated = True
print ("FOUND!")
#Draw and display the corners
cv2.drawChessboardCorners(image, (nx, ny), corners, ret)
# Do camera calibration given object points and image points
ret, self.mtx, self.dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, grey.shape[::-1], None, None)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = self.mtx
dist_pickle["dist"] = self.dist
dist_pickle['objpoints'] = objpoints
dist_pickle['imgpoints'] = imgpoints
pickle.dump( dist_pickle, open( "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/camera_cal_pickle.p", "wb" ) )
#else:
#print("Searching...")
return image
def drawQuad(self, image, points, color=[255, 0, 0], thickness=4):
p1, p2, p3, p4 = points
cv2.line(image, tuple(p1), tuple(p2), color, thickness)
cv2.line(image, tuple(p2), tuple(p3), color, thickness)
cv2.line(image, tuple(p3), tuple(p4), color, thickness)
cv2.line(image, tuple(p4), tuple(p1), color, thickness)
def perspective_transform(self, image, debug=True, size_top=70, size_bottom=370):
height, width = image.shape[0:2]
output_size = height/2
#src = np.float32([[(width/2) - size_top, height*0.65], [(width/2) + size_top, height*0.65], [(width/2) + size_bottom, height-50], [(width/2) - size_bottom, height-50]])
src = np.float32([[512, 450], [675, 454], [707, 560], [347, 568]])
dst = np.float32([[347, height], [707, height], [707, 0], [347, 0]])
#dst = np.float32([[(width/2) - output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) + output_size], [(width/2) - output_size, (height/2) + output_size]])
M = cv2.getPerspectiveTransform(src, dst)
print(M)
warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR)
if debug:
self.drawQuad(image, src, [255, 0, 0])
self.drawQuad(image, dst, [255, 255, 0])
plt.imshow(image)
plt.show()
return warped
def undistort_image(self, image):
return cv2.undistort(image, self.mtx, self.dist, None, self.mtx)
def run(self):
while True:
# Only run loop if we have an image
if self.process:
filename = "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/check_test.png"
image = cv2.imread(filename, flags=cv2.IMREAD_COLOR)
if self.calibrated is not True:
#print("Calibrating...")
cornersImage = self.camera_cal(image)
cvImage = cornersImage
else:
correctedImage = self.undistort_image(self.latestImage) # Distortion Correction Function
transformImage = self.perspective_transform(self.latestImage)
cvImage = transformImage
# Publish Undistorted Image
try:
imgmsg = self.bridge.cv2_to_imgmsg(cvImage, "bgr8") #"mono8" "bgr8"
self.image_pub.publish(imgmsg)
except CvBridgeError as e:
print(e)
def main(args):
rospy.init_node('camera_calibarion', anonymous=True)
cc = camera_calibarion()
cc.run()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
| bsd-2-clause |
joshzarrabi/e-mission-server | emission/analysis/classification/inference/mode.py | 2 | 17308 | # Standard imports
from pymongo import MongoClient
import logging
from datetime import datetime
import sys
import os
import numpy as np
import scipy as sp
import time
from datetime import datetime
# Our imports
import emission.analysis.section_features as easf
import emission.core.get_database as edb
# We are not going to use the feature matrix for analysis unless we have at
# least 50 points in the training set. 50 is arbitrary. We could also consider
# combining the old and new training data, but this is really a bootstrapping
# problem, so we don't need to solve it right now.
minTrainingSetSize = 1000
class ModeInferencePipeline:
def __init__(self):
self.featureLabels = ["distance", "duration", "first filter mode", "sectionId", "avg speed",
"speed EV", "speed variance", "max speed", "max accel", "isCommute",
"heading change rate", "stop rate", "velocity change rate",
"start lat", "start lng", "stop lat", "stop lng",
"start hour", "end hour", "close to bus stop", "close to train stop",
"close to airport"]
self.Sections = edb.get_section_db()
def runPipeline(self):
allConfirmedTripsQuery = ModeInferencePipeline.getSectionQueryWithGroundTruth({'$ne': ''})
(self.modeList, self.confirmedSections) = self.loadTrainingDataStep(allConfirmedTripsQuery)
logging.debug("confirmedSections.count() = %s" % (self.confirmedSections.count()))
if (self.confirmedSections.count() < minTrainingSetSize):
logging.info("initial loadTrainingDataStep DONE")
logging.debug("current training set too small, reloading from backup!")
backupSections = MongoClient('localhost').Backup_database.Stage_Sections
(self.modeList, self.confirmedSections) = self.loadTrainingDataStep(allConfirmedTripsQuery, backupSections)
logging.info("loadTrainingDataStep DONE")
(self.bus_cluster, self.train_cluster) = self.generateBusAndTrainStopStep()
logging.info("generateBusAndTrainStopStep DONE")
(self.featureMatrix, self.resultVector) = self.generateFeatureMatrixAndResultVectorStep()
logging.info("generateFeatureMatrixAndResultVectorStep DONE")
(self.cleanedFeatureMatrix, self.cleanedResultVector) = self.cleanDataStep()
logging.info("cleanDataStep DONE")
self.selFeatureIndices = self.selectFeatureIndicesStep()
logging.info("selectFeatureIndicesStep DONE")
self.selFeatureMatrix = self.cleanedFeatureMatrix[:,self.selFeatureIndices]
self.model = self.buildModelStep()
logging.info("buildModelStep DONE")
toPredictTripsQuery = {"$and": [{'type': 'move'},
ModeInferencePipeline.getModeQuery(''),
{'predicted_mode': None}]}
(self.toPredictFeatureMatrix, self.sectionIds, self.sectionUserIds) = self.generateFeatureMatrixAndIDsStep(toPredictTripsQuery)
logging.info("generateFeatureMatrixAndIDsStep DONE")
self.predictedProb = self.predictModesStep()
logging.info("predictModesStep DONE")
self.savePredictionsStep()
logging.info("savePredictionsStep DONE")
# Most of the time, this will be an int, but it can also be a subquery, like
# {'$ne': ''}. This will be used to find the set of entries for the training
# set, for example
@staticmethod
def getModeQuery(groundTruthMode):
# We need the existence check because the corrected mode is not guaranteed to exist,
# and if it doesn't exist, it will end up match the != '' query (since it
# is not '', it is non existent)
correctedModeQuery = lambda mode: {'$and': [{'corrected_mode': {'$exists': True}},
{'corrected_mode': groundTruthMode}]}
return {'$or': [correctedModeQuery(groundTruthMode),
{'confirmed_mode': groundTruthMode}]}
@staticmethod
def getSectionQueryWithGroundTruth(groundTruthMode):
return {"$and": [{'type': 'move'},
ModeInferencePipeline.getModeQuery(groundTruthMode)]}
# TODO: Refactor into generic steps and results
def loadTrainingDataStep(self, sectionQuery, sectionDb = None):
logging.debug("START TRAINING DATA STEP")
if (sectionDb == None):
sectionDb = self.Sections
begin = time.time()
logging.debug("Section data set size = %s" % sectionDb.find({'type': 'move'}).count())
duration = time.time() - begin
logging.debug("Getting dataset size took %s" % (duration))
logging.debug("Querying confirmedSections %s" % (datetime.now()))
begin = time.time()
confirmedSections = sectionDb.find(sectionQuery)
duration = time.time() - begin
logging.debug("Querying confirmedSection took %s" % (duration))
logging.debug("Querying stage modes %s" % (datetime.now()))
begin = time.time()
modeList = []
for mode in edb.get_mode_db().find():
modeList.append(mode)
logging.debug(mode)
duration = time.time() - begin
logging.debug("Querying stage modes took %s" % (duration))
logging.debug("Section query with ground truth %s" % (datetime.now()))
begin = time.time()
logging.debug("Training set total size = %s" %
sectionDb.find(ModeInferencePipeline.getSectionQueryWithGroundTruth({'$ne': ''})).count())
for mode in modeList:
logging.debug("%s: %s" % (mode['mode_name'],
sectionDb.find(ModeInferencePipeline.getSectionQueryWithGroundTruth(mode['mode_id']))))
duration = time.time() - begin
logging.debug("Getting section query with ground truth took %s" % (duration))
duration = time.time() - begin
return (modeList, confirmedSections)
# TODO: Should mode_cluster be in featurecalc or here?
def generateBusAndTrainStopStep(self):
bus_cluster=easf.mode_cluster(5,105,1)
train_cluster=easf.mode_cluster(6,600,1)
air_cluster=easf.mode_cluster(9,600,1)
return (bus_cluster, train_cluster)
# Feature matrix construction
def generateFeatureMatrixAndResultVectorStep(self):
featureMatrix = np.zeros([self.confirmedSections.count(), len(self.featureLabels)])
resultVector = np.zeros(self.confirmedSections.count())
logging.debug("created data structures of size %s" % self.confirmedSections.count())
# There are a couple of additions to the standard confirmedSections cursor here.
# First, we read it in batches of 300 in order to avoid the 10 minute timeout
# Our logging shows that we can process roughly 500 entries in 10 minutes
# Second, it looks like the cursor requeries while iterating. So when we
# first check, we get count of x, but if new entries were read (or in
# this case, classified) while we are iterating over the cursor, we may
# end up processing > x entries.
# This will crash the script because we will try to access a record that
# doesn't exist.
# So we limit the records to the size of the matrix that we have created
for (i, section) in enumerate(self.confirmedSections.limit(featureMatrix.shape[0]).batch_size(300)):
try:
self.updateFeatureMatrixRowWithSection(featureMatrix, i, section)
resultVector[i] = self.getGroundTruthMode(section)
if i % 100 == 0:
logging.debug("Processing record %s " % i)
except Exception, e:
logging.debug("skipping section %s due to error %s " % (section, e))
return (featureMatrix, resultVector)
def getGroundTruthMode(self, section):
# logging.debug("getting ground truth for section %s" % section)
if 'corrected_mode' in section:
# logging.debug("Returning corrected mode %s" % section['corrected_mode'])
return section['corrected_mode']
else:
# logging.debug("Returning confirmed mode %s" % section['confirmed_mode'])
return section['confirmed_mode']
# Features are:
# 0. distance
# 1. duration
# 2. first filter mode
# 3. sectionId
# 4. avg speed
# 5. speed EV
# 6. speed variance
# 7. max speed
# 8. max accel
# 9. isCommute
# 10. heading change rate (currently unfilled)
# 11. stop rate (currently unfilled)
# 12. velocity change rate (currently unfilled)
# 13. start lat
# 14. start lng
# 15. stop lat
# 16. stop lng
# 17. start hour
# 18. end hour
# 19. both start and end close to bus stop
# 20. both start and end close to train station
# 21. both start and end close to airport
def updateFeatureMatrixRowWithSection(self, featureMatrix, i, section):
featureMatrix[i, 0] = section['distance']
featureMatrix[i, 1] = (section['section_end_datetime'] - section['section_start_datetime']).total_seconds()
# Deal with unknown modes like "airplane"
try:
featureMatrix[i, 2] = section['mode']
except ValueError:
featureMatrix[i, 2] = 0
featureMatrix[i, 3] = section['section_id']
featureMatrix[i, 4] = easf.calAvgSpeed(section)
speeds = easf.calSpeeds(section)
if speeds != None and len(speeds) > 0:
featureMatrix[i, 5] = np.mean(speeds)
featureMatrix[i, 6] = np.std(speeds)
featureMatrix[i, 7] = np.max(speeds)
else:
# They will remain zero
pass
accels = easf.calAccels(section)
if accels != None and len(accels) > 0:
featureMatrix[i, 8] = np.max(accels)
else:
# They will remain zero
pass
featureMatrix[i, 9] = ('commute' in section) and (section['commute'] == 'to' or section['commute'] == 'from')
featureMatrix[i, 10] = easf.calHCR(section)
featureMatrix[i, 11] = easf.calSR(section)
featureMatrix[i, 12] = easf.calVCR(section)
if 'section_start_point' in section and section['section_start_point'] != None:
startCoords = section['section_start_point']['coordinates']
featureMatrix[i, 13] = startCoords[0]
featureMatrix[i, 14] = startCoords[1]
if 'section_end_point' in section and section['section_end_point'] != None:
endCoords = section['section_end_point']['coordinates']
featureMatrix[i, 15] = endCoords[0]
featureMatrix[i, 16] = endCoords[1]
featureMatrix[i, 17] = section['section_start_datetime'].time().hour
featureMatrix[i, 18] = section['section_end_datetime'].time().hour
if (hasattr(self, "bus_cluster")):
featureMatrix[i, 19] = easf.mode_start_end_coverage(section, self.bus_cluster,105)
if (hasattr(self, "train_cluster")):
featureMatrix[i, 20] = easf.mode_start_end_coverage(section, self.train_cluster,600)
if (hasattr(self, "air_cluster")):
featureMatrix[i, 21] = easf.mode_start_end_coverage(section, self.air_cluster,600)
# Replace NaN and inf by zeros so that it doesn't crash later
featureMatrix[i] = np.nan_to_num(featureMatrix[i])
def cleanDataStep(self):
runIndices = self.resultVector == 2
transportIndices = self.resultVector == 4
mixedIndices = self.resultVector == 8
airIndices = self.resultVector == 9
unknownIndices = self.resultVector == 0
strippedIndices = np.logical_not(runIndices | transportIndices | mixedIndices | unknownIndices)
logging.debug("Stripped trips with mode: run %s, transport %s, mixed %s, unknown %s unstripped %s" %
(np.count_nonzero(runIndices), np.count_nonzero(transportIndices),
np.count_nonzero(mixedIndices), np.count_nonzero(unknownIndices),
np.count_nonzero(strippedIndices)))
strippedFeatureMatrix = self.featureMatrix[strippedIndices]
strippedResultVector = self.resultVector[strippedIndices]
# In spite of stripping out the values, we see that there are clear
# outliers. This is almost certainly a mis-classified trip, because the
# distance and speed are both really large, but the mode is walking. Let's
# manually filter out this outlier.
distanceOutliers = strippedFeatureMatrix[:,0] > 500000
speedOutliers = strippedFeatureMatrix[:,4] > 100
speedMeanOutliers = strippedFeatureMatrix[:,5] > 80
speedVarianceOutliers = strippedFeatureMatrix[:,6] > 70
maxSpeedOutliers = strippedFeatureMatrix[:,7] > 160
logging.debug("Stripping out distanceOutliers %s, speedOutliers %s, speedMeanOutliers %s, speedVarianceOutliers %s, maxSpeedOutliers %s" %
(np.nonzero(distanceOutliers), np.nonzero(speedOutliers),
np.nonzero(speedMeanOutliers), np.nonzero(speedVarianceOutliers),
np.nonzero(maxSpeedOutliers)))
nonOutlierIndices = np.logical_not(distanceOutliers | speedOutliers | speedMeanOutliers | speedVarianceOutliers | maxSpeedOutliers)
logging.debug("nonOutlierIndices.shape = %s" % nonOutlierIndices.shape)
return (strippedFeatureMatrix[nonOutlierIndices],
strippedResultVector[nonOutlierIndices])
# Feature Indices
def selectFeatureIndicesStep(self):
genericFeatureIndices = list(xrange(0,10))
AdvancedFeatureIndices = list(xrange(10,13))
LocationFeatureIndices = list(xrange(13,17))
TimeFeatureIndices = list(xrange(17,19))
BusTrainFeatureIndices = list(xrange(19,22))
logging.debug("generic features = %s" % genericFeatureIndices)
logging.debug("advanced features = %s" % AdvancedFeatureIndices)
logging.debug("location features = %s" % LocationFeatureIndices)
logging.debug("time features = %s" % TimeFeatureIndices)
logging.debug("bus train features = %s" % BusTrainFeatureIndices)
return genericFeatureIndices + BusTrainFeatureIndices
def buildModelStep(self):
from sklearn import ensemble
forestClf = ensemble.RandomForestClassifier()
model = forestClf.fit(self.selFeatureMatrix, self.cleanedResultVector)
return model
def generateFeatureMatrixAndIDsStep(self, sectionQuery):
toPredictSections = self.Sections.find(sectionQuery)
logging.debug("Predicting values for %d sections" % toPredictSections.count())
featureMatrix = np.zeros([toPredictSections.count(), len(self.featureLabels)])
sectionIds = []
sectionUserIds = []
for (i, section) in enumerate(toPredictSections.limit(featureMatrix.shape[0]).batch_size(300)):
if i % 50 == 0:
logging.debug("Processing test record %s " % i)
self.updateFeatureMatrixRowWithSection(featureMatrix, i, section)
sectionIds.append(section['_id'])
sectionUserIds.append(section['user_id'])
return (featureMatrix[:,self.selFeatureIndices], sectionIds, sectionUserIds)
def predictModesStep(self):
return self.model.predict_proba(self.toPredictFeatureMatrix)
# The current probability will only have results for values from the set of
# unique values in the resultVector. This means that the location of the
# highest probability is not a 1:1 mapping to the mode, which will probably
# have issues down the road. We are going to fix this here by storing the
# non-zero probabilities in a map instead of in a list. We used to have an
# list here, but we move to a map instead because we plan to support lots of
# different modes, and having an giant array consisting primarily of zeros
# doesn't sound like a great option.
# In other words, uniqueModes = [1, 5]
# predictedProb = [[1,0], [0,1]]
# allModes has length 8
# returns [{'walking': 1}, {'bus': 1}]
def convertPredictedProbToMap(self, allModeList, uniqueModes, predictedProbArr):
currProbMap = {}
uniqueModesInt = [int(um) for um in uniqueModes]
logging.debug("predictedProbArr has %s non-zero elements" % np.count_nonzero(predictedProbArr))
logging.debug("uniqueModes are %s " % uniqueModesInt)
for (j, uniqueMode) in enumerate(uniqueModesInt):
if predictedProbArr[j] != 0:
# Modes start from 1, but allModeList indices start from 0
# so walking (mode id 1) -> modeList[0]
modeName = allModeList[uniqueMode-1]['mode_name']
logging.debug("Setting probability of mode %s (%s) to %s" %
(uniqueMode, modeName, predictedProbArr[j]))
currProbMap[modeName] = predictedProbArr[j]
return currProbMap
def savePredictionsStep(self):
from emission.core.wrapper.user import User
from emission.core.wrapper.client import Client
uniqueModes = sorted(set(self.cleanedResultVector))
for i in range(self.predictedProb.shape[0]):
currSectionId = self.sectionIds[i]
currProb = self.convertPredictedProbToMap(self.modeList, uniqueModes, self.predictedProb[i])
logging.debug("Updating probability for section with id = %s" % currSectionId)
self.Sections.update({'_id': currSectionId}, {"$set": {"predicted_mode": currProb}})
currUser = User.fromUUID(self.sectionUserIds[i])
clientSpecificUpdate = Client(currUser.getFirstStudy()).clientSpecificSetters(currUser.uuid, currSectionId, currProb)
if clientSpecificUpdate != None:
self.Sections.update({'_id': currSectionId}, clientSpecificUpdate)
if __name__ == "__main__":
import json
config_data = json.load(open('config.json'))
log_base_dir = config_data['paths']['log_base_dir']
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',
filename="%s/pipeline.log" % log_base_dir, level=logging.DEBUG)
modeInferPipeline = ModeInferencePipeline()
modeInferPipeline.runPipeline()
| bsd-3-clause |
numenta/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colorbar.py | 69 | 27260 | '''
Colorbar toolkit with two classes and a function:
:class:`ColorbarBase`
the base class with full colorbar drawing functionality.
It can be used as-is to make a colorbar for a given colormap;
a mappable object (e.g., image) is not needed.
:class:`Colorbar`
the derived class for use with images or contour plots.
:func:`make_axes`
a function for resizing an axes and adding a second axes
suitable for a colorbar
The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
'''
import numpy as np
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import matplotlib.cbook as cbook
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.collections as collections
import matplotlib.contour as contour
make_axes_kw_doc = '''
========== ====================================================
Property Description
========== ====================================================
*fraction* 0.15; fraction of original axes to use for colorbar
*pad* 0.05 if vertical, 0.15 if horizontal; fraction
of original axes between colorbar and new image axes
*shrink* 1.0; fraction by which to shrink the colorbar
*aspect* 20; ratio of long to short dimensions
========== ====================================================
'''
colormap_kw_doc = '''
=========== ====================================================
Property Description
=========== ====================================================
*extend* [ 'neither' | 'both' | 'min' | 'max' ]
If not 'neither', make pointed end(s) for out-of-
range values. These are set for a given colormap
using the colormap set_under and set_over methods.
*spacing* [ 'uniform' | 'proportional' ]
Uniform spacing gives each discrete color the same
space; proportional makes the space proportional to
the data interval.
*ticks* [ None | list of ticks | Locator object ]
If None, ticks are determined automatically from the
input.
*format* [ None | format string | Formatter object ]
If None, the
:class:`~matplotlib.ticker.ScalarFormatter` is used.
If a format string is given, e.g. '%.3f', that is
used. An alternative
:class:`~matplotlib.ticker.Formatter` object may be
given instead.
*drawedges* [ False | True ] If true, draw lines at color
boundaries.
=========== ====================================================
The following will probably be useful only in the context of
indexed colors (that is, when the mappable has norm=NoNorm()),
or other unusual circumstances.
============ ===================================================
Property Description
============ ===================================================
*boundaries* None or a sequence
*values* None or a sequence which must be of length 1 less
than the sequence of *boundaries*. For each region
delimited by adjacent entries in *boundaries*, the
color mapped to the corresponding value in values
will be used.
============ ===================================================
'''
colorbar_doc = '''
Add a colorbar to a plot.
Function signatures for the :mod:`~matplotlib.pyplot` interface; all
but the first are also method signatures for the
:meth:`~matplotlib.figure.Figure.colorbar` method::
colorbar(**kwargs)
colorbar(mappable, **kwargs)
colorbar(mappable, cax=cax, **kwargs)
colorbar(mappable, ax=ax, **kwargs)
arguments:
*mappable*
the :class:`~matplotlib.image.Image`,
:class:`~matplotlib.contour.ContourSet`, etc. to
which the colorbar applies; this argument is mandatory for the
:meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
:func:`~matplotlib.pyplot.colorbar` function, which sets the
default to the current image.
keyword arguments:
*cax*
None | axes object into which the colorbar will be drawn
*ax*
None | parent axes object from which space for a new
colorbar axes will be stolen
Additional keyword arguments are of two kinds:
axes properties:
%s
colorbar properties:
%s
If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
kwarg is included automatically.
Note that the *shrink* kwarg provides a simple way to keep a vertical
colorbar, for example, from being taller than the axes of the mappable
to which the colorbar is attached; but it is a manual method requiring
some trial and error. If the colorbar is too tall (or a horizontal
colorbar is too wide) use a smaller value of *shrink*.
For more precise control, you can manually specify the positions of
the axes objects in which the mappable and the colorbar are drawn. In
this case, do not use any of the axes properties kwargs.
returns:
:class:`~matplotlib.colorbar.Colorbar` instance; see also its base class,
:class:`~matplotlib.colorbar.ColorbarBase`. Call the
:meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
to label the colorbar.
''' % (make_axes_kw_doc, colormap_kw_doc)
class ColorbarBase(cm.ScalarMappable):
'''
Draw a colorbar in an existing axes.
This is a base class for the :class:`Colorbar` class, which is the
basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab
function.
It is also useful by itself for showing a colormap. If the *cmap*
kwarg is given but *boundaries* and *values* are left as None,
then the colormap will be displayed on a 0-1 scale. To show the
under- and over-value colors, specify the *norm* as::
colors.Normalize(clip=False)
To show the colors versus index instead of on the 0-1 scale,
use::
norm=colors.NoNorm.
Useful attributes:
:attr:`ax`
the Axes instance in which the colorbar is drawn
:attr:`lines`
a LineCollection if lines were drawn, otherwise None
:attr:`dividers`
a LineCollection if *drawedges* is True, otherwise None
Useful public methods are :meth:`set_label` and :meth:`add_lines`.
'''
_slice_dict = {'neither': slice(0,1000000),
'both': slice(1,-1),
'min': slice(1,1000000),
'max': slice(0,-1)}
def __init__(self, ax, cmap=None,
norm=None,
alpha=1.0,
values=None,
boundaries=None,
orientation='vertical',
extend='neither',
spacing='uniform', # uniform or proportional
ticks=None,
format=None,
drawedges=False,
filled=True,
):
self.ax = ax
if cmap is None: cmap = cm.get_cmap()
if norm is None: norm = colors.Normalize()
self.alpha = alpha
cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
self.values = values
self.boundaries = boundaries
self.extend = extend
self._inside = self._slice_dict[extend]
self.spacing = spacing
self.orientation = orientation
self.drawedges = drawedges
self.filled = filled
self.solids = None
self.lines = None
self.dividers = None
self.set_label('')
if cbook.iterable(ticks):
self.locator = ticker.FixedLocator(ticks, nbins=len(ticks))
else:
self.locator = ticks # Handle default in _ticker()
if format is None:
if isinstance(self.norm, colors.LogNorm):
self.formatter = ticker.LogFormatter()
else:
self.formatter = ticker.ScalarFormatter()
elif cbook.is_string_like(format):
self.formatter = ticker.FormatStrFormatter(format)
else:
self.formatter = format # Assume it is a Formatter
# The rest is in a method so we can recalculate when clim changes.
self.draw_all()
def draw_all(self):
'''
Calculate any free parameters based on the current cmap and norm,
and do all the drawing.
'''
self._process_values()
self._find_range()
X, Y = self._mesh()
C = self._values[:,np.newaxis]
self._config_axes(X, Y)
if self.filled:
self._add_solids(X, Y, C)
self._set_label()
def _config_axes(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = lines.Line2D(xy[:, 0], xy[:, 1], color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = patches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_xticks([])
ax.yaxis.set_label_position('right')
ax.yaxis.set_ticks_position('right')
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_yticks([])
ax.xaxis.set_label_position('bottom')
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
def _set_label(self):
if self.orientation == 'vertical':
self.ax.set_ylabel(self._label, **self._labelkw)
else:
self.ax.set_xlabel(self._label, **self._labelkw)
def set_label(self, label, **kw):
'''
Label the long axis of the colorbar
'''
self._label = label
self._labelkw = kw
self._set_label()
def _outline(self, X, Y):
'''
Return *x*, *y* arrays of colorbar bounding polygon,
taking orientation into account.
'''
N = X.shape[0]
ii = [0, 1, N-2, N-1, 2*N-1, 2*N-2, N+1, N, 0]
x = np.take(np.ravel(np.transpose(X)), ii)
y = np.take(np.ravel(np.transpose(Y)), ii)
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if self.orientation == 'horizontal':
return np.hstack((y, x))
return np.hstack((x, y))
def _edges(self, X, Y):
'''
Return the separator line segments; helper for _add_solids.
'''
N = X.shape[0]
# Using the non-array form of these line segments is much
# simpler than making them into arrays.
if self.orientation == 'vertical':
return [zip(X[i], Y[i]) for i in range(1, N-1)]
else:
return [zip(Y[i], X[i]) for i in range(1, N-1)]
def _add_solids(self, X, Y, C):
'''
Draw the colors using :meth:`~matplotlib.axes.Axes.pcolor`;
optionally add separators.
'''
## Change to pcolorfast after fixing bugs in some backends...
if self.orientation == 'vertical':
args = (X, Y, C)
else:
args = (np.transpose(Y), np.transpose(X), np.transpose(C))
kw = {'cmap':self.cmap, 'norm':self.norm,
'shading':'flat', 'alpha':self.alpha}
# Save, set, and restore hold state to keep pcolor from
# clearing the axes. Ordinarily this will not be needed,
# since the axes object should already have hold set.
_hold = self.ax.ishold()
self.ax.hold(True)
col = self.ax.pcolor(*args, **kw)
self.ax.hold(_hold)
#self.add_observer(col) # We should observe, not be observed...
self.solids = col
if self.drawedges:
self.dividers = collections.LineCollection(self._edges(X,Y),
colors=(mpl.rcParams['axes.edgecolor'],),
linewidths=(0.5*mpl.rcParams['axes.linewidth'],)
)
self.ax.add_collection(self.dividers)
def add_lines(self, levels, colors, linewidths):
'''
Draw lines on the colorbar.
'''
N = len(levels)
dummy, y = self._locate(levels)
if len(y) <> N:
raise ValueError("levels are outside colorbar range")
x = np.array([0.0, 1.0])
X, Y = np.meshgrid(x,y)
if self.orientation == 'vertical':
xy = [zip(X[i], Y[i]) for i in range(N)]
else:
xy = [zip(Y[i], X[i]) for i in range(N)]
col = collections.LineCollection(xy, linewidths=linewidths)
self.lines = col
col.set_color(colors)
self.ax.add_collection(col)
def _ticker(self):
'''
Return two sequences: ticks (colorbar data locations)
and ticklabels (strings).
'''
locator = self.locator
formatter = self.formatter
if locator is None:
if self.boundaries is None:
if isinstance(self.norm, colors.NoNorm):
nv = len(self._values)
base = 1 + int(nv/10)
locator = ticker.IndexLocator(base=base, offset=0)
elif isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
locator = ticker.FixedLocator(b, nbins=10)
elif isinstance(self.norm, colors.LogNorm):
locator = ticker.LogLocator()
else:
locator = ticker.MaxNLocator()
else:
b = self._boundaries[self._inside]
locator = ticker.FixedLocator(b, nbins=10)
if isinstance(self.norm, colors.NoNorm):
intv = self._values[0], self._values[-1]
else:
intv = self.vmin, self.vmax
locator.create_dummy_axis()
formatter.create_dummy_axis()
locator.set_view_interval(*intv)
locator.set_data_interval(*intv)
formatter.set_view_interval(*intv)
formatter.set_data_interval(*intv)
b = np.array(locator())
b, ticks = self._locate(b)
formatter.set_locs(b)
ticklabels = [formatter(t, i) for i, t in enumerate(b)]
offset_string = formatter.get_offset()
return ticks, ticklabels, offset_string
def _process_values(self, b=None):
'''
Set the :attr:`_boundaries` and :attr:`_values` attributes
based on the input boundaries and values. Input boundaries
can be *self.boundaries* or the argument *b*.
'''
if b is None:
b = self.boundaries
if b is not None:
self._boundaries = np.asarray(b, dtype=float)
if self.values is None:
self._values = 0.5*(self._boundaries[:-1]
+ self._boundaries[1:])
if isinstance(self.norm, colors.NoNorm):
self._values = (self._values + 0.00001).astype(np.int16)
return
self._values = np.array(self.values)
return
if self.values is not None:
self._values = np.array(self.values)
if self.boundaries is None:
b = np.zeros(len(self.values)+1, 'd')
b[1:-1] = 0.5*(self._values[:-1] - self._values[1:])
b[0] = 2.0*b[1] - b[2]
b[-1] = 2.0*b[-2] - b[-3]
self._boundaries = b
return
self._boundaries = np.array(self.boundaries)
return
# Neither boundaries nor values are specified;
# make reasonable ones based on cmap and norm.
if isinstance(self.norm, colors.NoNorm):
b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5
v = np.zeros((len(b)-1,), dtype=np.int16)
v[self._inside] = np.arange(self.cmap.N, dtype=np.int16)
if self.extend in ('both', 'min'):
v[0] = -1
if self.extend in ('both', 'max'):
v[-1] = self.cmap.N
self._boundaries = b
self._values = v
return
elif isinstance(self.norm, colors.BoundaryNorm):
b = list(self.norm.boundaries)
if self.extend in ('both', 'min'):
b = [b[0]-1] + b
if self.extend in ('both', 'max'):
b = b + [b[-1] + 1]
b = np.array(b)
v = np.zeros((len(b)-1,), dtype=float)
bi = self.norm.boundaries
v[self._inside] = 0.5*(bi[:-1] + bi[1:])
if self.extend in ('both', 'min'):
v[0] = b[0] - 1
if self.extend in ('both', 'max'):
v[-1] = b[-1] + 1
self._boundaries = b
self._values = v
return
else:
if not self.norm.scaled():
self.norm.vmin = 0
self.norm.vmax = 1
b = self.norm.inverse(self._uniform_y(self.cmap.N+1))
if self.extend in ('both', 'min'):
b[0] = b[0] - 1
if self.extend in ('both', 'max'):
b[-1] = b[-1] + 1
self._process_values(b)
def _find_range(self):
'''
Set :attr:`vmin` and :attr:`vmax` attributes to the first and
last boundary excluding extended end boundaries.
'''
b = self._boundaries[self._inside]
self.vmin = b[0]
self.vmax = b[-1]
def _central_N(self):
'''number of boundaries **before** extension of ends'''
nb = len(self._boundaries)
if self.extend == 'both':
nb -= 2
elif self.extend in ('min', 'max'):
nb -= 1
return nb
def _extended_N(self):
'''
Based on the colormap and extend variable, return the
number of boundaries.
'''
N = self.cmap.N + 1
if self.extend == 'both':
N += 2
elif self.extend in ('min', 'max'):
N += 1
return N
def _uniform_y(self, N):
'''
Return colorbar data coordinates for *N* uniformly
spaced boundaries, plus ends if required.
'''
if self.extend == 'neither':
y = np.linspace(0, 1, N)
else:
if self.extend == 'both':
y = np.zeros(N + 2, 'd')
y[0] = -0.05
y[-1] = 1.05
elif self.extend == 'min':
y = np.zeros(N + 1, 'd')
y[0] = -0.05
else:
y = np.zeros(N + 1, 'd')
y[-1] = 1.05
y[self._inside] = np.linspace(0, 1, N)
return y
def _proportional_y(self):
'''
Return colorbar data coordinates for the boundaries of
a proportional colorbar.
'''
if isinstance(self.norm, colors.BoundaryNorm):
b = self._boundaries[self._inside]
y = (self._boundaries - self._boundaries[0])
y = y / (self._boundaries[-1] - self._boundaries[0])
else:
y = self.norm(self._boundaries.copy())
if self.extend in ('both', 'min'):
y[0] = -0.05
if self.extend in ('both', 'max'):
y[-1] = 1.05
yi = y[self._inside]
norm = colors.Normalize(yi[0], yi[-1])
y[self._inside] = norm(yi)
return y
def _mesh(self):
'''
Return X,Y, the coordinate arrays for the colorbar pcolormesh.
These are suitable for a vertical colorbar; swapping and
transposition for a horizontal colorbar are done outside
this function.
'''
x = np.array([0.0, 1.0])
if self.spacing == 'uniform':
y = self._uniform_y(self._central_N())
else:
y = self._proportional_y()
self._y = y
X, Y = np.meshgrid(x,y)
if self.extend in ('min', 'both'):
X[0,:] = 0.5
if self.extend in ('max', 'both'):
X[-1,:] = 0.5
return X, Y
def _locate(self, x):
'''
Given a possible set of color data values, return the ones
within range, together with their corresponding colorbar
data coordinates.
'''
if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)):
b = self._boundaries
xn = x
xout = x
else:
# Do calculations using normalized coordinates so
# as to make the interpolation more accurate.
b = self.norm(self._boundaries, clip=False).filled()
# We do our own clipping so that we can allow a tiny
# bit of slop in the end point ticks to allow for
# floating point errors.
xn = self.norm(x, clip=False).filled()
in_cond = (xn > -0.001) & (xn < 1.001)
xn = np.compress(in_cond, xn)
xout = np.compress(in_cond, x)
# The rest is linear interpolation with clipping.
y = self._y
N = len(b)
ii = np.minimum(np.searchsorted(b, xn), N-1)
i0 = np.maximum(ii - 1, 0)
#db = b[ii] - b[i0]
db = np.take(b, ii) - np.take(b, i0)
db = np.where(i0==ii, 1.0, db)
#dy = y[ii] - y[i0]
dy = np.take(y, ii) - np.take(y, i0)
z = np.take(y, i0) + (xn-np.take(b,i0))*dy/db
return xout, z
def set_alpha(self, alpha):
self.alpha = alpha
class Colorbar(ColorbarBase):
def __init__(self, ax, mappable, **kw):
mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax
# are set when colorbar is called,
# even if mappable.draw has not yet
# been called. This will not change
# vmin, vmax if they are already set.
self.mappable = mappable
kw['cmap'] = mappable.cmap
kw['norm'] = mappable.norm
kw['alpha'] = mappable.get_alpha()
if isinstance(mappable, contour.ContourSet):
CS = mappable
kw['boundaries'] = CS._levels
kw['values'] = CS.cvalues
kw['extend'] = CS.extend
#kw['ticks'] = CS._levels
kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
kw['filled'] = CS.filled
ColorbarBase.__init__(self, ax, **kw)
if not CS.filled:
self.add_lines(CS)
else:
ColorbarBase.__init__(self, ax, **kw)
def add_lines(self, CS):
'''
Add the lines from a non-filled
:class:`~matplotlib.contour.ContourSet` to the colorbar.
'''
if not isinstance(CS, contour.ContourSet) or CS.filled:
raise ValueError('add_lines is only for a ContourSet of lines')
tcolors = [c[0] for c in CS.tcolors]
tlinewidths = [t[0] for t in CS.tlinewidths]
# The following was an attempt to get the colorbar lines
# to follow subsequent changes in the contour lines,
# but more work is needed: specifically, a careful
# look at event sequences, and at how
# to make one object track another automatically.
#tcolors = [col.get_colors()[0] for col in CS.collections]
#tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
#print 'tlinewidths:', tlinewidths
ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths)
def update_bruteforce(self, mappable):
'''
Manually change any contour line colors. This is called
when the image or contour plot to which this colorbar belongs
is changed.
'''
# We are using an ugly brute-force method: clearing and
# redrawing the whole thing. The problem is that if any
# properties have been changed by methods other than the
# colorbar methods, those changes will be lost.
self.ax.cla()
self.draw_all()
#if self.vmin != self.norm.vmin or self.vmax != self.norm.vmax:
# self.ax.cla()
# self.draw_all()
if isinstance(self.mappable, contour.ContourSet):
CS = self.mappable
if not CS.filled:
self.add_lines(CS)
#if self.lines is not None:
# tcolors = [c[0] for c in CS.tcolors]
# self.lines.set_color(tcolors)
#Fixme? Recalculate boundaries, ticks if vmin, vmax have changed.
#Fixme: Some refactoring may be needed; we should not
# be recalculating everything if there was a simple alpha
# change.
def make_axes(parent, **kw):
orientation = kw.setdefault('orientation', 'vertical')
fraction = kw.pop('fraction', 0.15)
shrink = kw.pop('shrink', 1.0)
aspect = kw.pop('aspect', 20)
#pb = transforms.PBox(parent.get_position())
pb = parent.get_position(original=True).frozen()
if orientation == 'vertical':
pad = kw.pop('pad', 0.05)
x1 = 1.0-fraction
pb1, pbx, pbcb = pb.splitx(x1-pad, x1)
pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb)
anchor = (0.0, 0.5)
panchor = (1.0, 0.5)
else:
pad = kw.pop('pad', 0.15)
pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad)
pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb)
aspect = 1.0/aspect
anchor = (0.5, 1.0)
panchor = (0.5, 0.0)
parent.set_position(pb1)
parent.set_anchor(panchor)
fig = parent.get_figure()
cax = fig.add_axes(pbcb)
cax.set_aspect(aspect, anchor=anchor, adjustable='box')
return cax, kw
make_axes.__doc__ ='''
Resize and reposition a parent axes, and return a child
axes suitable for a colorbar::
cax, kw = make_axes(parent, **kw)
Keyword arguments may include the following (with defaults):
*orientation*
'vertical' or 'horizontal'
%s
All but the first of these are stripped from the input kw set.
Returns (cax, kw), the child axes and the reduced kw dictionary.
''' % make_axes_kw_doc
| agpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/scipy/stats/_stats_mstats_common.py | 12 | 8157 | from collections import namedtuple
import numpy as np
from . import distributions
__all__ = ['_find_repeats', 'linregress', 'theilslopes']
def linregress(x, y=None):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimated gradient.
See also
--------
optimize.curve_fit : Use non-linear least squares to fit a function to data.
optimize.leastsq : Minimize the sum of squares of a set of equations.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
# To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
('r-squared:', 0.080402268539028335)
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
| mit |
jayflo/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
bachiraoun/fullrmc | Constraints/StructureFactorConstraints.py | 1 | 64342 | """
StructureFactorConstraints contains classes for all constraints related experimental static structure factor functions.
.. inheritance-diagram:: fullrmc.Constraints.StructureFactorConstraints
:parts: 1
"""
# standard libraries imports
from __future__ import print_function
import itertools, re
# external libraries imports
import numpy as np
from pdbparser.Utilities.Database import is_element_property, get_element_property
from pdbparser.Utilities.Collection import get_normalized_weighting
# fullrmc imports
from ..Globals import INT_TYPE, FLOAT_TYPE, PI, PRECISION, LOGGER
from ..Globals import str, long, unicode, bytes, basestring, range, xrange, maxint
from ..Core.Collection import is_number, is_integer, get_path
from ..Core.Collection import reset_if_collected_out_of_date, get_real_elements_weight
from ..Core.Collection import get_caller_frames
from ..Core.Constraint import Constraint, ExperimentalConstraint
from ..Core.pairs_histograms import multiple_pairs_histograms_coords, full_pairs_histograms_coords
class StructureFactorConstraint(ExperimentalConstraint):
"""
Controls the Structure Factor noted as S(Q) and also called
total-scattering structure function or Static Structure Factor.
S(Q) is a dimensionless quantity and normalized such as the average
value :math:`<S(Q)>=1`.
It is worth mentioning that S(Q) is nothing other than the normalized and
corrected diffraction pattern if all experimental artefacts powder.
The computation of S(Q) is done through an inverse Sine Fourier transform
of the computed pair distribution function G(r).
.. math::
S(Q) = 1+ \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
From an atomistic model and histogram point of view, G(r) is computed as
the following:
.. math::
G(r) = 4 \\pi r (\\rho_{r} - \\rho_{0})
= 4 \\pi \\rho_{0} r (g(r)-1)
= \\frac{R(r)}{r} - 4 \\pi \\rho_{0}
g(r) is calculated after binning all pair atomic distances into a
weighted histograms as the following:
.. math::
g(r) = \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{\\rho_{i,j}(r)}{\\rho_{0}}
= \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{n_{i,j}(r) / v(r)}{N_{i,j} / V}
Where:\n
:math:`Q` is the momentum transfer. \n
:math:`r` is the distance between two atoms. \n
:math:`\\rho_{i,j}(r)` is the pair density function of atoms i and j. \n
:math:`\\rho_{0}` is the average number density of the system. \n
:math:`w_{i,j}` is the relative weighting of atom types i and j. \n
:math:`R(r)` is the radial distribution function (rdf). \n
:math:`N` is the total number of atoms. \n
:math:`V` is the volume of the system. \n
:math:`n_{i,j}(r)` is the number of atoms i neighbouring j at a distance r. \n
:math:`v(r)` is the annulus volume at distance r and of thickness dr. \n
:math:`N_{i,j}` is the total number of atoms i and j in the system. \n
+----------------------------------------------------------------------+
|.. figure:: reduced_structure_factor_constraint_plot_method.png |
| :width: 530px |
| :height: 400px |
| :align: left |
| |
| Reduced structure factor of memory shape Nickel-Titanium alloy. |
+----------------------------------------------------------------------+
:Parameters:
#. experimentalData (numpy.ndarray, string): Experimental data as
numpy.ndarray or string path to load data using numpy.loadtxt
method.
#. dataWeights (None, numpy.ndarray): Weights array of the same number
of points of experimentalData used in the constraint's standard
error computation. Therefore particular fitting emphasis can be
put on different data points that might be considered as more or less
important in order to get a reasonable and plausible modal.\n
If None is given, all data points are considered of the same
importance in the computation of the constraint's standard error.\n
If numpy.ndarray is given, all weights must be positive and all
zeros weighted data points won't contribute to the total
constraint's standard error. At least a single weight point is
required to be non-zeros and the weights array will be automatically
scaled upon setting such as the the sum of all the weights
is equal to the number of data points.
#. weighting (string): The elements weighting scheme. It must be any
atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius) defined
in pdbparser database. In case of xrays or neutrons experimental
weights, one can simply set weighting to 'xrays' or 'neutrons'
and the value will be automatically adjusted to respectively
'atomicNumber' and 'neutronCohb'. If attribute values are
missing in the pdbparser database, atomic weights must be
given in atomsWeight dictionary argument.
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
#. rmin (None, number): The minimum distance value to compute G(r)
histogram. If None is given, rmin is computed as
:math:`2 \\pi / Q_{max}`.
#. rmax (None, number): The maximum distance value to compute G(r)
histogram. If None is given, rmax is computed as
:math:`2 \\pi / dQ`.
#. dr (None, number): The distance bin value to compute G(r)
histogram. If None is given, bin is computed as
:math:`2 \\pi / (Q_{max}-Q_{min})`.
#. scaleFactor (number): A normalization scale factor used to normalize
the computed data to the experimental ones.
#. adjustScaleFactor (list, tuple): Used to adjust fit or guess
the best scale factor during stochastic engine runtime.
It must be a list of exactly three entries.\n
#. The frequency in number of generated moves of finding the best
scale factor. If 0 frequency is given, it means that the scale
factor is fixed.
#. The minimum allowed scale factor value.
#. The maximum allowed scale factor value.
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
#. limits (None, tuple, list): The distance limits to compute the
histograms. If None is given, the limits will be automatically
set the the min and max distance of the experimental data.
Otherwise, a tuple of exactly two items where the first is the
minimum distance or None and the second is the maximum distance
or None.
**NB**: If adjustScaleFactor first item (frequency) is 0, the scale factor
will remain untouched and the limits minimum and maximum won't be checked.
.. code-block:: python
# import fullrmc modules
from fullrmc.Engine import Engine
from fullrmc.Constraints.StructureFactorConstraints import StructureFactorConstraint
# create engine
ENGINE = Engine(path='my_engine.rmc')
# set pdb file
ENGINE.set_pdb('system.pdb')
# create and add constraint
SFC = StructureFactorConstraint(experimentalData="sq.dat", weighting="atomicNumber")
ENGINE.add_constraints(SFC)
"""
def __init__(self, experimentalData, dataWeights=None,
weighting="atomicNumber", atomsWeight=None,
rmin=None, rmax=None, dr=None,
scaleFactor=1.0, adjustScaleFactor=(0, 0.8, 1.2),
windowFunction=None, limits=None):
# initialize variables
self.__experimentalQValues = None
self.__experimentalSF = None
self.__rmin = None
self.__rmax = None
self.__dr = None
self.__minimumDistance = None
self.__maximumDistance = None
self.__bin = None
self.__shellCenters = None
self.__histogramSize = None
self.__shellVolumes = None
self.__Gr2SqMatrix = None
# initialize constraint
super(StructureFactorConstraint, self).__init__( experimentalData=experimentalData, dataWeights=dataWeights, scaleFactor=scaleFactor, adjustScaleFactor=adjustScaleFactor)
# set atomsWeight
self.set_atoms_weight(atomsWeight)
# set elements weighting
self.set_weighting(weighting)
self.__set_weighting_scheme()
# set window function
self.set_window_function(windowFunction)
# set r parameters
self.set_rmin(rmin)
self.set_rmax(rmax)
self.set_dr(dr)
# set frame data
FRAME_DATA = [d for d in self.FRAME_DATA]
FRAME_DATA.extend(['_StructureFactorConstraint__experimentalQValues',
'_StructureFactorConstraint__experimentalSF',
'_StructureFactorConstraint__elementsPairs',
'_StructureFactorConstraint__weightingScheme',
'_StructureFactorConstraint__atomsWeight',
'_StructureFactorConstraint__qmin',
'_StructureFactorConstraint__qmax',
'_StructureFactorConstraint__rmin',
'_StructureFactorConstraint__rmax',
'_StructureFactorConstraint__dr',
'_StructureFactorConstraint__minimumDistance',
'_StructureFactorConstraint__maximumDistance',
'_StructureFactorConstraint__bin',
'_StructureFactorConstraint__shellCenters',
'_StructureFactorConstraint__histogramSize',
'_StructureFactorConstraint__shellVolumes',
'_StructureFactorConstraint__Gr2SqMatrix',
'_StructureFactorConstraint__windowFunction',
'_elementsWeight',] )
RUNTIME_DATA = [d for d in self.RUNTIME_DATA]
RUNTIME_DATA.extend( [] )
object.__setattr__(self, 'FRAME_DATA', tuple(FRAME_DATA) )
object.__setattr__(self, 'RUNTIME_DATA', tuple(RUNTIME_DATA) )
def _codify_update__(self, name='constraint', addDependencies=True):
dependencies = []
code = []
if addDependencies:
code.extend(dependencies)
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name}.set_used({val})".format(name=name, val=self.used))
code.append("{name}.set_scale_factor({val})".format(name=name, val=self.scaleFactor))
code.append("{name}.set_adjust_scale_factor({val})".format(name=name, val=self.adjustScaleFactor))
code.append("{name}.set_data_weights(dw)".format(name=name))
code.append("{name}.set_atoms_weight({val})".format(name=name, val=self.atomsWeight))
code.append("{name}.set_window_function(wf)".format(name=name))
code.append("{name}.set_rmin({val})".format(name=name, val=self.rmin))
code.append("{name}.set_rmax({val})".format(name=name, val=self.rmax))
code.append("{name}.set_dr({val})".format(name=name, val=self.dr))
code.append("{name}.set_limits({val})".format(name=name, val=self.limits))
# return
return dependencies, '\n'.join(code)
def _codify__(self, engine, name='constraint', addDependencies=True):
assert isinstance(name, basestring), LOGGER.error("name must be a string")
assert re.match('[a-zA-Z_][a-zA-Z0-9_]*$', name) is not None, LOGGER.error("given name '%s' can't be used as a variable name"%name)
klass = self.__class__.__name__
dependencies = ['import numpy as np','from fullrmc.Constraints import StructureFactorConstraints']
code = []
if addDependencies:
code.extend(dependencies)
x = list(self.experimentalData[:,0])
y = list(self.experimentalData[:,1])
code.append("x = {x}".format(x=x))
code.append("y = {y}".format(y=y))
code.append("d = np.transpose([x,y]).astype(np.float32)")
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name} = {klass}s.{klass}\
(experimentalData=d, dataWeights=dw, weighting='{weighting}', atomsWeight={atomsWeight}, \
rmin={rmin}, rmax={rmax}, dr={dr}, scaleFactor={scaleFactor}, adjustScaleFactor={adjustScaleFactor}, \
shapeFuncParams=sfp, windowFunction=wf, limits={limits})".format(name=name, klass=klass,
weighting=self.weighting, atomsWeight=self.atomsWeight, rmin=self.rmin,
rmax=self.rmax, dr=self.dr, scaleFactor=self.scaleFactor,
adjustScaleFactor=self.adjustScaleFactor, limits=self.limits))
code.append("{engine}.add_constraints([{name}])".format(engine=engine, name=name))
# return
return dependencies, '\n'.join(code)
#def __getstate__(self):
# # make sure that __Gr2SqMatrix is not pickled but saved to the disk as None
# state = super(StructureFactorConstraint, self).__getstate__()
# state["_StructureFactorConstraint__Gr2SqMatrix"] = None
# return state
#
#def __setstate__(self, state):
# # make sure to regenerate G(r) to S(q) matrix at loading time
# self.__dict__.update( state )
# self.__set_Gr_2_Sq_matrix()
#
def __set_Gr_2_Sq_matrix(self):
if self.__experimentalQValues is None or self.__shellCenters is None:
self.__Gr2SqMatrix = None
else:
Qs = self.__experimentalQValues
Rs = self.__shellCenters
dr = self.__shellCenters[1]-self.__shellCenters[0]
qr = Rs.reshape((-1,1))*(np.ones((len(Rs),1), dtype=FLOAT_TYPE)*Qs)
sinqr = np.sin(qr)
sinqr_q = sinqr/Qs
self.__Gr2SqMatrix = dr*sinqr_q
def __set_weighting_scheme(self):
if self.engine is not None:
self.__elementsPairs = sorted(itertools.combinations_with_replacement(self.engine.elements,2))
#elementsWeight = dict([(el,float(get_element_property(el,self.__weighting))) for el in self.engine.elements])
#self._elementsWeight = dict([(el,self.__atomsWeight.get(el, float(get_element_property(el,self.__weighting)))) for el in self.engine.elements])
self._elementsWeight = get_real_elements_weight(elements=self.engine.elements, weightsDict=self.__atomsWeight, weighting=self.__weighting)
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight)
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
else:
self.__elementsPairs = None
self.__weightingScheme = None
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__elementsPairs' : self.__elementsPairs,
'_StructureFactorConstraint__weightingScheme': self.__weightingScheme})
def __set_histogram(self):
if self.__minimumDistance is None or self.__maximumDistance is None or self.__bin is None:
self.__shellCenters = None
self.__histogramSize = None
self.__shellVolumes = None
else:
# compute edges
if self.engine is not None and self.rmax is None:
minHalfBox = np.min( [np.linalg.norm(v)/2. for v in self.engine.basisVectors])
self.__edges = np.arange(self.__minimumDistance,minHalfBox, self.__bin).astype(FLOAT_TYPE)
else:
self.__edges = np.arange(self.__minimumDistance, self.__maximumDistance+self.__bin, self.__bin).astype(FLOAT_TYPE)
# adjust rmin and rmax
self.__minimumDistance = self.__edges[0]
self.__maximumDistance = self.__edges[-1]
# compute shellCenters
self.__shellCenters = (self.__edges[0:-1]+self.__edges[1:])/FLOAT_TYPE(2.)
# set histogram size
self.__histogramSize = INT_TYPE( len(self.__edges)-1 )
# set shell centers and volumes
self.__shellVolumes = FLOAT_TYPE(4.0/3.)*PI*((self.__edges[1:])**3 - self.__edges[0:-1]**3)
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__minimumDistance': self.__minimumDistance,
'_StructureFactorConstraint__maximumDistance': self.__maximumDistance,
'_StructureFactorConstraint__shellCenters' : self.__shellCenters,
'_StructureFactorConstraint__histogramSize' : self.__histogramSize,
'_StructureFactorConstraint__shellVolumes' : self.__shellVolumes})
# reset constraint
self.reset_constraint()
# reset sq matrix
self.__set_Gr_2_Sq_matrix()
def _on_collector_reset(self):
pass
@property
def rmin(self):
""" Histogram minimum distance. """
return self.__rmin
@property
def rmax(self):
""" Histogram maximum distance. """
return self.__rmax
@property
def dr(self):
""" Histogram bin size."""
return self.__dr
@property
def bin(self):
""" Computed histogram distance bin size."""
return self.__bin
@property
def minimumDistance(self):
""" Computed histogram minimum distance. """
return self.__minimumDistance
@property
def maximumDistance(self):
""" Computed histogram maximum distance. """
return self.__maximumDistance
@property
def qmin(self):
""" Experimental data reciprocal distances minimum. """
return self.__qmin
@property
def qmax(self):
""" Experimental data reciprocal distances maximum. """
return self.__qmax
@property
def dq(self):
""" Experimental data reciprocal distances bin size. """
return self.__experimentalQValues[1]-self.__experimentalQValues[0]
@property
def experimentalQValues(self):
""" Experimental data used q values. """
return self.__experimentalQValues
@property
def histogramSize(self):
""" Histogram size"""
return self.__histogramSize
@property
def shellCenters(self):
""" Shells center array"""
return self.__shellCenters
@property
def shellVolumes(self):
""" Shells volume array"""
return self.__shellVolumes
@property
def experimentalSF(self):
""" Experimental Structure Factor or S(q)"""
return self.__experimentalSF
@property
def elementsPairs(self):
""" Elements pairs """
return self.__elementsPairs
@property
def atomsWeight(self):
"""Custom atoms weight"""
return self.__atomsWeight
@property
def weighting(self):
""" Elements weighting definition. """
return self.__weighting
@property
def weightingScheme(self):
""" Elements weighting scheme. """
return self.__weightingScheme
@property
def windowFunction(self):
""" Convolution window function. """
return self.__windowFunction
@property
def Gr2SqMatrix(self):
""" G(r) to S(q) transformation matrix."""
return self.__Gr2SqMatrix
@property
def _experimentalX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalQValues
@property
def _experimentalY(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalSF
@property
def _modelX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalQValues
def listen(self, message, argument=None):
"""
Listens to any message sent from the Broadcaster.
:Parameters:
#. message (object): Any python object to send to constraint's
listen method.
#. argument (object): Any type of argument to pass to the
listeners.
"""
if message in ("engine set","update pdb","update molecules indexes","update elements indexes","update names indexes"):
self.__set_weighting_scheme()
# reset histogram
if self.engine is not None:
self.__set_histogram()
self.reset_constraint() # ADDED 2017-JAN-08
elif message in("update boundary conditions",):
self.reset_constraint()
def set_rmin(self, rmin):
"""
Set rmin value.
:parameters:
#. rmin (None, number): The minimum distance value to compute G(r)
histogram. If None is given, rmin is computed as
:math:`2 \\pi / Q_{max}`.
"""
if rmin is None:
minimumDistance = FLOAT_TYPE( 2.*PI/self.__qmax )
else:
assert is_number(rmin), LOGGER.error("rmin must be None or a number")
minimumDistance = FLOAT_TYPE(rmin)
if self.__maximumDistance is not None:
assert minimumDistance<self.__maximumDistance, LOGGER.error("rmin must be smaller than rmax %s"%self.__maximumDistance)
self.__rmin = rmin
self.__minimumDistance = minimumDistance
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__rmin': self.__rmin,
'_StructureFactorConstraint__minimumDistance': self.__minimumDistance})
# reset histogram
self.__set_histogram()
def set_rmax(self, rmax):
"""
Set rmax value.
:Parameters:
#. rmax (None, number): The maximum distance value to compute G(r)
histogram. If None is given, rmax is computed as
:math:`2 \\pi / dQ`.
"""
if rmax is None:
dq = self.__experimentalQValues[1]-self.__experimentalQValues[0]
maximumDistance = FLOAT_TYPE( 2.*PI/dq )
else:
assert is_number(rmax), LOGGER.error("rmax must be None or a number")
maximumDistance = FLOAT_TYPE(rmax)
if self.__minimumDistance is not None:
assert maximumDistance>self.__minimumDistance, LOGGER.error("rmax must be bigger than rmin %s"%self.__minimumDistance)
self.__rmax = rmax
self.__maximumDistance = maximumDistance
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__rmax': self.__rmax,
'_StructureFactorConstraint__maximumDistance': self.__maximumDistance})
# reset histogram
self.__set_histogram()
def set_dr(self, dr):
"""
Set dr value.
:Parameters:
#. dr (None, number): The distance bin value to compute G(r)
histogram. If None is given, bin is computed as
:math:`2 \\pi / (Q_{max}-Q_{min})`.
"""
if dr is None:
bin = 2.*PI/self.__qmax
rbin = round(bin,1)
if rbin>bin:
rbin -= 0.1
bin = FLOAT_TYPE( rbin )
else:
assert is_number(dr), LOGGER.error("dr must be None or a number")
bin = FLOAT_TYPE(dr)
self.__dr = dr
self.__bin = bin
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__dr': self.__dr,
'_StructureFactorConstraint__bin': self.__bin})
# reset histogram
self.__set_histogram()
def set_weighting(self, weighting):
"""
Set elements weighting. It must be a valid entry of pdbparser atom's
database.
:Parameters:
#. weighting (string): The elements weighting scheme. It must be
any atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius)
defined in pdbparser database. In case of xrays or neutrons
experimental weights, one can simply set weighting to 'xrays'
or 'neutrons' and the value will be automatically adjusted to
respectively 'atomicNumber' and 'neutronCohb'. If attribute
values are missing in the pdbparser database, atomic weights
must be given in atomsWeight dictionary argument.
"""
if weighting.lower() in ["xrays","x-rays","xray","x-ray"]:
LOGGER.fixed("'%s' weighting is set to atomicNumber"%weighting)
weighting = "atomicNumber"
elif weighting.lower() in ["neutron","neutrons"]:
LOGGER.fixed("'%s' weighting is set to neutronCohb"%weighting)
weighting = "neutronCohb"
assert is_element_property(weighting),LOGGER.error( "weighting is not a valid pdbparser atoms database entry")
assert weighting != "atomicFormFactor", LOGGER.error("atomicFormFactor weighting is not allowed")
self.__weighting = weighting
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__weighting': self.__weighting})
def set_atoms_weight(self, atomsWeight):
"""
Custom set atoms weight. This is the way to setting a atoms weights
different than the given weighting scheme.
:Parameters:
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
"""
if atomsWeight is None:
AW = {}
else:
assert isinstance(atomsWeight, dict),LOGGER.error("atomsWeight must be None or a dictionary")
AW = {}
for k in atomsWeight:
assert isinstance(k, basestring),LOGGER.error("atomsWeight keys must be strings")
try:
val = float(atomsWeight[k])
except:
raise LOGGER.error( "atomsWeight values must be numerical")
AW[k]=val
# set atomsWeight
self.__atomsWeight = AW
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__atomsWeight': self.__atomsWeight})
def set_window_function(self, windowFunction):
"""
Set convolution window function.
:Parameters:
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
"""
if windowFunction is not None:
assert isinstance(windowFunction, np.ndarray), LOGGER.error("windowFunction must be a numpy.ndarray")
assert windowFunction.dtype.type is FLOAT_TYPE, LOGGER.error("windowFunction type must be %s"%FLOAT_TYPE)
assert len(windowFunction.shape) == 1, LOGGER.error("windowFunction must be of dimension 1")
assert len(windowFunction) <= self.experimentalData.shape[0], LOGGER.error("windowFunction length must be smaller than experimental data")
# normalize window function
windowFunction /= np.sum(windowFunction)
# check window size
# set windowFunction
self.__windowFunction = windowFunction
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__windowFunction': self.__windowFunction})
def set_experimental_data(self, experimentalData):
"""
Set constraint's experimental data.
:Parameters:
#. experimentalData (numpy.ndarray, string): The experimental
data as numpy.ndarray or string path to load data using
numpy.loadtxt function.
"""
# get experimental data
super(StructureFactorConstraint, self).set_experimental_data(experimentalData=experimentalData)
# set limits
self.set_limits(self.limits)
def set_limits(self, limits):
"""
Set the reciprocal distance limits (qmin, qmax).
:Parameters:
#. limits (None, tuple, list): Distance limits to bound
experimental data and compute histograms.
If None is given, the limits will be automatically set to
min and max reciprocal distance recorded in experimental data.
If given, a tuple of minimum reciprocal distance (qmin) or None
and maximum reciprocal distance (qmax) or None should be given.
"""
self._ExperimentalConstraint__set_limits(limits)
# set qvalues
self.__experimentalQValues = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd+1,0].astype(FLOAT_TYPE)
self.__experimentalSF = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd+1,1].astype(FLOAT_TYPE)
# set qmin and qmax
self.__qmin = self.__experimentalQValues[0]
self.__qmax = self.__experimentalQValues[-1]
assert self.__qmin>0, LOGGER.error("qmin must be bigger than 0. Experimental null q values are ambigous. Try setting limits.")
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__experimentalQValues': self.__experimentalQValues,
'_StructureFactorConstraint__experimentalSF' : self.__experimentalSF,
'_StructureFactorConstraint__qmin' : self.__qmin,
'_StructureFactorConstraint__qmax' : self.__qmax})
# set used dataWeights
self._set_used_data_weights(limitsIndexStart=self.limitsIndexStart, limitsIndexEnd=self.limitsIndexEnd)
# reset constraint
self.reset_constraint()
# reset sq matrix
self.__set_Gr_2_Sq_matrix()
def update_standard_error(self):
""" Compute and set constraint's standardError."""
# set standardError
totalSQ = self.get_constraint_value()["total_no_window"]
self.set_standard_error(self.compute_standard_error(modelData = totalSQ))
def check_experimental_data(self, experimentalData):
"""
Check whether experimental data is correct.
:Parameters:
#. experimentalData (object): The experimental data to check.
:Returns:
#. result (boolean): Whether it is correct or not.
#. message (str): Checking message that explains whats's wrong
with the given data
"""
if not isinstance(experimentalData, np.ndarray):
return False, "experimentalData must be a numpy.ndarray"
if experimentalData.dtype.type is not FLOAT_TYPE:
return False, "experimentalData type must be %s"%FLOAT_TYPE
if len(experimentalData.shape) !=2:
return False, "experimentalData must be of dimension 2"
if experimentalData.shape[1] !=2:
return False, "experimentalData must have only 2 columns"
# check distances order
inOrder = (np.array(sorted(experimentalData[:,0]), dtype=FLOAT_TYPE)-experimentalData[:,0])<=PRECISION
if not np.all(inOrder):
return False, "experimentalData distances are not sorted in order"
if experimentalData[0][0]<0:
return False, "experimentalData distances min value is found negative"
# data format is correct
return True, ""
def compute_standard_error(self, modelData):
"""
Compute the standard error (StdErr) as the squared deviations
between model computed data and the experimental ones.
.. math::
StdErr = \\sum \\limits_{i}^{N} W_{i}(Y(X_{i})-F(X_{i}))^{2}
Where:\n
:math:`N` is the total number of experimental data points. \n
:math:`W_{i}` is the data point weight. It becomes equivalent to 1 when dataWeights is set to None. \n
:math:`Y(X_{i})` is the experimental data point :math:`X_{i}`. \n
:math:`F(X_{i})` is the computed from the model data :math:`X_{i}`. \n
:Parameters:
#. modelData (numpy.ndarray): The data to compare with the
experimental one and compute the squared deviation.
:Returns:
#. standardError (number): The calculated constraint's
standardError.
"""
# compute difference
diff = self.__experimentalSF-modelData
# return standard error
if self._usedDataWeights is None:
return np.add.reduce((diff)**2)
else:
return np.add.reduce(self._usedDataWeights*((diff)**2))
def _get_Sq_from_Gr(self, Gr):
return np.sum(Gr.reshape((-1,1))*self.__Gr2SqMatrix, axis=0)+1
def _apply_scale_factor(self, Sq, scaleFactor):
if scaleFactor != 1:
Sq = scaleFactor*(Sq-1) + 1
return Sq
def __get_total_Sq(self, data, rho0):
"""This method is created just to speed up the computation of
the total Sq upon fitting."""
Gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
Dij = Nij/self.engine.volume
nij = data["intra"][idi,idj,:]+data["inter"][idi,idj,:]
Gr += wij*nij/Dij
else:
Nij = ni*nj
Dij = Nij/self.engine.volume
nij = data["intra"][idi,idj,:]+data["intra"][idj,idi,:] + data["inter"][idi,idj,:]+data["inter"][idj,idi,:]
Gr += wij*nij/Dij
# Devide by shells volume
Gr /= self.shellVolumes
# compute total G(r)
#rho0 = (self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (FLOAT_TYPE(4.)*PI*self.__shellCenters*rho0)*(Gr-1)
# Compute S(q) from G(r)
Sq = self._get_Sq_from_Gr(Gr)
# Multiply by scale factor
self._fittedScaleFactor = self.get_adjusted_scale_factor(self.__experimentalSF, Sq, self._usedDataWeights)
# apply scale factor
Sq = self._apply_scale_factor(Sq, self._fittedScaleFactor)
# apply multiframe prior and weight
Sq = self._apply_multiframe_prior(Sq)
# convolve total with window function
if self.__windowFunction is not None:
Sq = np.convolve(Sq, self.__windowFunction, 'same')
return Sq
def get_adjusted_scale_factor(self, experimentalData, modelData, dataWeights):
"""Overload to reduce S(q) prior to fitting scale factor.
S(q) -> 1 at high q and this will create a wrong scale factor.
Overloading can be avoided but it's better to for performance reasons
"""
SF = self.scaleFactor
# check to update scaleFactor
if self.adjustScaleFactorFrequency:
if not self.engine.accepted%self.adjustScaleFactorFrequency:
SF = self.fit_scale_factor(experimentalData-1, modelData-1, dataWeights)
return SF
def _get_constraint_value(self, data, applyMultiframePrior=True):
# http://erice2011.docking.org/upload/Other/Billinge_PDF/03-ReadingMaterial/BillingePDF2011.pdf page 6
#import time
#startTime = time.clock()
output = {}
for pair in self.__elementsPairs:
output["sf_intra_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["sf_inter_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["sf_total_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
output["sf_intra_%s-%s" % pair] += data["intra"][idi,idj,:]
output["sf_inter_%s-%s" % pair] += data["inter"][idi,idj,:]
else:
Nij = ni*nj
output["sf_intra_%s-%s" % pair] += data["intra"][idi,idj,:] + data["intra"][idj,idi,:]
output["sf_inter_%s-%s" % pair] += data["inter"][idi,idj,:] + data["inter"][idj,idi,:]
# compute g(r)
nij = output["sf_intra_%s-%s" % pair] + output["sf_inter_%s-%s" % pair]
dij = nij/self.__shellVolumes
Dij = Nij/self.engine.volume
gr += wij*dij/Dij
# calculate intensityFactor
intensityFactor = (self.engine.volume*wij)/(Nij*self.__shellVolumes)
# divide by factor
output["sf_intra_%s-%s" % pair] *= intensityFactor
output["sf_inter_%s-%s" % pair] *= intensityFactor
output["sf_total_%s-%s" % pair] = output["sf_intra_%s-%s" % pair] + output["sf_inter_%s-%s" % pair]
# Compute S(q) from G(r)
output["sf_intra_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_intra_%s-%s" % pair])
output["sf_inter_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_inter_%s-%s" % pair])
output["sf_total_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_total_%s-%s" % pair])
# compute total G(r)
rho0 = (self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (FLOAT_TYPE(4.)*PI*self.__shellCenters*rho0) * (gr-1)
# Compute S(q) from G(r)
Sq = self._get_Sq_from_Gr(Gr)
# multiply by scale factor
output["total_no_window"] = self._apply_scale_factor(Sq, self._fittedScaleFactor)
# apply multiframe prior and weight
if applyMultiframePrior:
output["total_no_window"] = self._apply_multiframe_prior(output["total_no_window"])
# convolve total with window function
if self.__windowFunction is not None:
output["total"] = np.convolve(output["total_no_window"], self.__windowFunction, 'same').astype(FLOAT_TYPE)
else:
output["total"] = output["total_no_window"]
return output
def get_constraint_value(self, applyMultiframePrior=True):
"""
Compute all partial Structure Factor (SQs).
:Parameters:
#. applyMultiframePrior (boolean): Whether to apply subframe weight
and prior to the total. This will only have an effect when used
frame is a subframe and in case subframe weight and prior is
defined.
:Returns:
#. SQs (dictionary): The SQs dictionnary, where keys are the
element wise intra and inter molecular SQs and values are
the computed SQs.
"""
if self.data is None:
LOGGER.warn("data must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.data, applyMultiframePrior=applyMultiframePrior)
def get_constraint_original_value(self):
"""
Compute all partial Pair Distribution Functions (PDFs).
:Returns:
#. PDFs (dictionary): The PDFs dictionnary, where keys are the
element wise intra and inter molecular PDFs and values are the
computed PDFs.
"""
if self.originalData is None:
LOGGER.warn("originalData must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.originalData)
@reset_if_collected_out_of_date
def compute_data(self, update=True):
""" Compute constraint's data.
:Parameters:
#. update (boolean): whether to update constraint data and
standard error with new computation. If data is computed and
updated by another thread or process while the stochastic
engine is running, this might lead to a state alteration of
the constraint which will lead to a no additional accepted
moves in the run
:Returns:
#. data (dict): constraint data dictionary
#. standardError (float): constraint standard error
"""
intra,inter = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# create data and compute standard error
data = {"intra":intra, "inter":inter}
totalSQ = self.__get_total_Sq(data, rho0=self.engine.numberDensity)
stdError = self.compute_standard_error(modelData = totalSQ)
# update
if update:
self.set_data(data)
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
self.set_standard_error(stdError)
# set original data
if self.originalData is None:
self._set_original_data(self.data)
# return
return data, stdError
def compute_before_move(self, realIndexes, relativeIndexes):
"""
Compute constraint before move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
"""
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
self.set_active_atoms_data_before_move( {"intra":intraM-intraF, "inter":interM-interF} )
self.set_active_atoms_data_after_move(None)
def compute_after_move(self, realIndexes, relativeIndexes, movedBoxCoordinates):
"""
Compute constraint after move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
#. movedBoxCoordinates (numpy.ndarray): The moved atoms new coordinates.
"""
# change coordinates temporarily
boxData = np.array(self.engine.boxCoordinates[relativeIndexes], dtype=FLOAT_TYPE)
self.engine.boxCoordinates[relativeIndexes] = movedBoxCoordinates
# calculate pair distribution function
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# set active atoms data
self.set_active_atoms_data_after_move( {"intra":intraM-intraF, "inter":interM-interF} )
# reset coordinates
self.engine.boxCoordinates[relativeIndexes] = boxData
# compute standardError after move
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
totalSQ = self.__get_total_Sq({"intra":dataIntra, "inter":dataInter}, rho0=self.engine.numberDensity)
self.set_after_move_standard_error( self.compute_standard_error(modelData = totalSQ) )
# increment tried
self.increment_tried()
def accept_move(self, realIndexes, relativeIndexes):
"""
Accept move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
# change permanently _data
self.set_data( {"intra":dataIntra, "inter":dataInter} )
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_standard_error( self.afterMoveStandardError )
self.set_after_move_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
# increment accepted
self.increment_accepted()
def reject_move(self, realIndexes, relativeIndexes):
"""
Reject move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_after_move_standard_error( None )
def compute_as_if_amputated(self, realIndex, relativeIndex):
"""
Compute and return constraint's data and standard error as if
given atom is amputated.
:Parameters:
#. realIndex (numpy.ndarray): Atom's index as a numpy array
of a single element.
#. relativeIndex (numpy.ndarray): Atom's relative index as a
numpy array of a single element.
"""
# compute data
self.compute_before_move(realIndexes=realIndex, relativeIndexes=relativeIndex)
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]
data = {"intra":dataIntra, "inter":dataInter}
# temporarily adjust self.__weightingScheme
weightingScheme = self.__weightingScheme
relativeIndex = relativeIndex[0]
selectedElement = self.engine.allElements[relativeIndex]
self.engine.numberOfAtomsPerElement[selectedElement] -= 1
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight )
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
## END OF ADDED 08 FEB 2017
# compute standard error
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
SF = self.adjustScaleFactorFrequency
self._set_adjust_scale_factor_frequency(0)
rho0 = ((self.engine.numberOfAtoms-1)/self.engine.volume).astype(FLOAT_TYPE)
totalSQ = self.__get_total_Sq(data, rho0=rho0)
standardError = self.compute_standard_error(modelData = totalSQ)
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
self._set_adjust_scale_factor_frequency(SF)
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
# set amputation
self.set_amputation_data( {'data':data, 'weightingScheme':self.__weightingScheme} )
# compute standard error
self.set_amputation_standard_error( standardError )
# reset weightingScheme and number of atoms per element
self.__weightingScheme = weightingScheme
self.engine.numberOfAtomsPerElement[selectedElement] += 1
def accept_amputation(self, realIndex, relativeIndex):
"""
Accept amputated atom and sets constraints data and standard error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
#self.set_data( self.amputationData ) ## COMMENTED 08 FEB 2017
self.set_data( self.amputationData['data'] )
self.__weightingScheme = self.amputationData['weightingScheme']
self.set_standard_error( self.amputationStandardError )
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
def reject_amputation(self, realIndex, relativeIndex):
"""
Reject amputated atom and set constraint's data and standard
error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
def _on_collector_collect_atom(self, realIndex):
pass
def _on_collector_release_atom(self, realIndex):
pass
def _constraint_copy_needs_lut(self):
return {'_StructureFactorConstraint__elementsPairs' :'_StructureFactorConstraint__elementsPairs',
'_StructureFactorConstraint__histogramSize' :'_StructureFactorConstraint__histogramSize',
'_StructureFactorConstraint__weightingScheme' :'_StructureFactorConstraint__weightingScheme',
'_StructureFactorConstraint__shellVolumes' :'_StructureFactorConstraint__shellVolumes',
'_StructureFactorConstraint__shellCenters' :'_StructureFactorConstraint__shellCenters',
'_StructureFactorConstraint__windowFunction' :'_StructureFactorConstraint__windowFunction',
'_StructureFactorConstraint__experimentalQValues' :'_StructureFactorConstraint__experimentalQValues',
'_StructureFactorConstraint__experimentalSF' :'_StructureFactorConstraint__experimentalSF',
'_StructureFactorConstraint__Gr2SqMatrix' :'_StructureFactorConstraint__Gr2SqMatrix',
'_StructureFactorConstraint__minimumDistance' :'_StructureFactorConstraint__minimumDistance',
'_StructureFactorConstraint__maximumDistance' :'_StructureFactorConstraint__maximumDistance',
'_StructureFactorConstraint__bin' :'_StructureFactorConstraint__bin',
'_ExperimentalConstraint__scaleFactor' :'_ExperimentalConstraint__scaleFactor',
'_ExperimentalConstraint__dataWeights' :'_ExperimentalConstraint__dataWeights',
'_ExperimentalConstraint__multiframePrior' :'_ExperimentalConstraint__multiframePrior',
'_ExperimentalConstraint__multiframeWeight' :'_ExperimentalConstraint__multiframeWeight',
'_ExperimentalConstraint__limits' :'_ExperimentalConstraint__limits',
'_ExperimentalConstraint__limitsIndexStart' :'_ExperimentalConstraint__limitsIndexStart',
'_ExperimentalConstraint__limitsIndexEnd' :'_ExperimentalConstraint__limitsIndexEnd',
'_Constraint__used' :'_Constraint__used',
'_Constraint__data' :'_Constraint__data',
'_Constraint__state' :'_Constraint__state',
'_Constraint__standardError' :'_Constraint__standardError',
'_fittedScaleFactor' :'_fittedScaleFactor',
'_usedDataWeights' :'_usedDataWeights',
'_Engine__state' :'_Engine__state',
'_Engine__boxCoordinates' :'_Engine__boxCoordinates',
'_Engine__basisVectors' :'_Engine__basisVectors',
'_Engine__isPBC' :'_Engine__isPBC',
'_Engine__moleculesIndex' :'_Engine__moleculesIndex',
'_Engine__elementsIndex' :'_Engine__elementsIndex',
'_Engine__numberOfAtomsPerElement' :'_Engine__numberOfAtomsPerElement',
'_Engine__elements' :'_Engine__elements',
'_Engine__numberDensity' :'_Engine__numberDensity',
'_Engine__volume' :'_Engine__volume',
'_Engine__realCoordinates' :'_Engine__realCoordinates',
'_atomsCollector' :'_atomsCollector',
('engine','_atomsCollector') :'_atomsCollector',
}
def plot(self, xlabelParams={'xlabel':'$Q(\\AA^{-1})$', 'size':10},
ylabelParams={'ylabel':'$S(Q)$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(StructureFactorConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
class ReducedStructureFactorConstraint(StructureFactorConstraint):
"""
The Reduced Structure Factor that we will also note S(Q)
is exactly the same quantity as the Structure Factor but with
the slight difference that it is normalized to 0 rather than 1
and therefore :math:`<S(Q)>=0`.
The computation of S(Q) is done through a Sine inverse Fourier transform
of the computed pair distribution function noted as G(r).
.. math::
S(Q) = \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
The only reason why the Reduced Structure Factor is implemented, is because
many experimental data are treated in this form. And it is just convenient
not to manipulate the experimental data every time.
"""
def _get_Sq_from_Gr(self, Gr):
return np.sum(Gr.reshape((-1,1))*self.Gr2SqMatrix, axis=0)
def _apply_scale_factor(self, Sq, scaleFactor):
if scaleFactor != 1:
Sq = scaleFactor*Sq
return Sq
def get_adjusted_scale_factor(self, experimentalData, modelData, dataWeights):
""" dummy overload that does exactly the same thing
"""
SF = self.scaleFactor
# check to update scaleFactor
if self.adjustScaleFactorFrequency:
if not self.engine.accepted%self.adjustScaleFactorFrequency:
SF = self.fit_scale_factor(experimentalData, modelData, dataWeights)
return SF
def plot(self, xlabelParams={'xlabel':'$Q(\\AA^{-1})$', 'size':10},
ylabelParams={'ylabel':'$S(Q)-1$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(StructureFactorConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
| agpl-3.0 |
APMonitor/arduino | 2_Regression/2nd_order_MIMO/GEKKO/tclab_2nd_order_linear.py | 1 | 3283 | import numpy as np
import time
import matplotlib.pyplot as plt
import random
# get gekko package with:
# pip install gekko
from gekko import GEKKO
import pandas as pd
# import data
data = pd.read_csv('data.txt')
tm = data['Time (sec)'].values
Q1s = data[' Heater 1'].values
Q2s = data[' Heater 2'].values
T1s = data[' Temperature 1'].values
T2s = data[' Temperature 2'].values
#########################################################
# Initialize Model as Estimator
#########################################################
m = GEKKO(name='tclab-mhe')
#m.server = 'http://127.0.0.1' # if local server is installed
# 120 second time horizon, 40 steps
m.time = tm
# Parameters to Estimate
K1 = m.FV(value=0.5)
K1.STATUS = 1
K1.FSTATUS = 0
K1.LOWER = 0.1
K1.UPPER = 1.0
K2 = m.FV(value=0.3)
K2.STATUS = 1
K2.FSTATUS = 0
K2.LOWER = 0.1
K2.UPPER = 1.0
K3 = m.FV(value=0.1)
K3.STATUS = 1
K3.FSTATUS = 0
K3.LOWER = 0.0001
K3.UPPER = 1.0
tau12 = m.FV(value=150)
tau12.STATUS = 1
tau12.FSTATUS = 0
tau12.LOWER = 50.0
tau12.UPPER = 250
tau3 = m.FV(value=15)
tau3.STATUS = 0
tau3.FSTATUS = 0
tau3.LOWER = 10
tau3.UPPER = 20
# Measured inputs
Q1 = m.MV(value=0)
Q1.FSTATUS = 1 # measured
Q1.value = Q1s
Q2 = m.MV(value=0)
Q2.FSTATUS = 1 # measured
Q2.value = Q2s
# Ambient temperature
Ta = m.Param(value=23.0) # degC
# State variables
TH1 = m.SV(value=T1s[0])
TH2 = m.SV(value=T2s[0])
# Measurements for model alignment
TC1 = m.CV(value=T1s)
TC1.STATUS = 1 # minimize error between simulation and measurement
TC1.FSTATUS = 1 # receive measurement
TC1.MEAS_GAP = 0.1 # measurement deadband gap
TC2 = m.CV(value=T1s[0])
TC2.STATUS = 1 # minimize error between simulation and measurement
TC2.FSTATUS = 1 # receive measurement
TC2.MEAS_GAP = 0.1 # measurement deadband gap
TC2.value = T2s
# Heat transfer between two heaters
DT = m.Intermediate(TH2-TH1)
# Empirical correlations
m.Equation(tau12 * TH1.dt() + (TH1-Ta) == K1*Q1 + K3*DT)
m.Equation(tau12 * TH2.dt() + (TH2-Ta) == K2*Q2 - K3*DT)
m.Equation(tau3 * TC1.dt() + TC1 == TH1)
m.Equation(tau3 * TC2.dt() + TC2 == TH2)
# Global Options
m.options.IMODE = 5 # MHE
m.options.EV_TYPE = 2 # Objective type
m.options.NODES = 3 # Collocation nodes
m.options.SOLVER = 3 # IPOPT
m.options.COLDSTART = 0 # COLDSTART on first cycle
# Predict Parameters and Temperatures
# use remote=False for local solve
m.solve()
# Create plot
plt.figure(figsize=(10,7))
ax=plt.subplot(2,1,1)
ax.grid()
plt.plot(tm,T1s,'ro',label=r'$T_1$ measured')
plt.plot(tm,TC1.value,'k-',label=r'$T_1$ predicted')
plt.plot(tm,T2s,'bx',label=r'$T_2$ measured')
plt.plot(tm,TC2.value,'k--',label=r'$T_2$ predicted')
plt.ylabel('Temperature (degC)')
plt.legend(loc=2)
ax=plt.subplot(2,1,2)
ax.grid()
plt.plot(tm,Q1s,'r-',label=r'$Q_1$')
plt.plot(tm,Q2s,'b:',label=r'$Q_2$')
plt.ylabel('Heaters')
plt.xlabel('Time (sec)')
plt.legend(loc='best')
# Print optimal values
print('K1: ' + str(K1.newval))
print('K2: ' + str(K2.newval))
print('K3: ' + str(K3.newval))
print('tau12: ' + str(tau12.newval))
print('tau3: ' + str(tau3.newval))
# Save figure
plt.savefig('tclab_estimation.png')
plt.show()
| apache-2.0 |
cython-testbed/pandas | pandas/tests/io/parser/test_textreader.py | 4 | 11387 | # -*- coding: utf-8 -*-
"""
Tests the TextReader class in parsers.pyx, which
is integral to the C engine in parsers.py
"""
import pytest
from pandas.compat import StringIO, BytesIO, map
from pandas import compat
import os
import sys
from numpy import nan
import numpy as np
from pandas import DataFrame
from pandas.io.parsers import (read_csv, TextFileReader)
from pandas.util.testing import assert_frame_equal
import pandas.util.testing as tm
from pandas._libs.parsers import TextReader
import pandas._libs.parsers as parser
class TestTextReader(object):
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath('io', 'parser', 'data')
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def test_file_handle(self):
with open(self.csv1, 'rb') as f:
reader = TextReader(f)
reader.read()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
reader.read()
def test_file_handle_mmap(self):
with open(self.csv1, 'rb') as f:
reader = TextReader(f, memory_map=True, header=None)
reader.read()
def test_StringIO(self):
with open(self.csv1, 'rb') as f:
text = f.read()
src = BytesIO(text)
reader = TextReader(src, header=None)
reader.read()
def test_string_factorize(self):
# should this be optional?
data = 'a\nb\na\nb\na'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert len(set(map(id, result[0]))) == 2
def test_skipinitialspace(self):
data = ('a, b\n'
'a, b\n'
'a, b\n'
'a, b')
reader = TextReader(StringIO(data), skipinitialspace=True,
header=None)
result = reader.read()
tm.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a', 'a'],
dtype=np.object_))
tm.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b', 'b'],
dtype=np.object_))
def test_parse_booleans(self):
data = 'True\nFalse\nTrue\nTrue'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert result[0].dtype == np.bool_
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(StringIO(data), delim_whitespace=True,
header=None)
result = reader.read()
tm.assert_numpy_array_equal(result[0], np.array(['a', 'a', 'a'],
dtype=np.object_))
tm.assert_numpy_array_equal(result[1], np.array(['b', 'b', 'b'],
dtype=np.object_))
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
expected = np.array(['a', 'hello\nthere', 'this'], dtype=np.object_)
tm.assert_numpy_array_equal(result[0], expected)
def test_euro_decimal(self):
data = '12345,67\n345,678'
reader = TextReader(StringIO(data), delimiter=':',
decimal=',', header=None)
result = reader.read()
expected = np.array([12345.67, 345.678])
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands(self):
data = '123,456\n12,500'
reader = TextReader(StringIO(data), delimiter=':',
thousands=',', header=None)
result = reader.read()
expected = np.array([123456, 12500], dtype=np.int64)
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands_alt(self):
data = '123.456\n12.500'
reader = TextFileReader(StringIO(data), delimiter=':',
thousands='.', header=None)
result = reader.read()
expected = DataFrame([123456, 12500])
tm.assert_frame_equal(result, expected)
@tm.capture_stderr
def test_skip_bad_lines(self):
# too many lines, see #2430 for why
data = ('a:b:c\n'
'd:e:f\n'
'g:h:i\n'
'j:k:l:m\n'
'l:m:n\n'
'o:p:q:r')
reader = TextReader(StringIO(data), delimiter=':',
header=None)
pytest.raises(parser.ParserError, reader.read)
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=False)
result = reader.read()
expected = {0: np.array(['a', 'd', 'g', 'l'], dtype=object),
1: np.array(['b', 'e', 'h', 'm'], dtype=object),
2: np.array(['c', 'f', 'i', 'n'], dtype=object)}
assert_array_dicts_equal(result, expected)
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=True)
reader.read()
val = sys.stderr.getvalue()
assert 'Skipping line 4' in val
assert 'Skipping line 6' in val
def test_header_not_enough_lines(self):
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',', header=2)
header = reader.header
expected = [['a', 'b', 'c']]
assert header == expected
recs = reader.read()
expected = {0: np.array([1, 4], dtype=np.int64),
1: np.array([2, 5], dtype=np.int64),
2: np.array([3, 6], dtype=np.int64)}
assert_array_dicts_equal(recs, expected)
def test_escapechar(self):
data = ('\\"hello world\"\n'
'\\"hello world\"\n'
'\\"hello world\"')
reader = TextReader(StringIO(data), delimiter=',', header=None,
escapechar='\\')
result = reader.read()
expected = {0: np.array(['"hello world"'] * 3, dtype=object)}
assert_array_dicts_equal(result, expected)
def test_eof_has_eol(self):
# handling of new line at EOF
pass
def test_na_substitution(self):
pass
def test_numpy_string_dtype(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S5,i4')
result = reader.read()
assert result[0].dtype == 'S5'
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaaa'], dtype='S5')
assert (result[0] == ex_values).all()
assert result[1].dtype == 'i4'
reader = _make_reader(dtype='S4')
result = reader.read()
assert result[0].dtype == 'S4'
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
assert (result[0] == ex_values).all()
assert result[1].dtype == 'S4'
def test_pass_dtype(self):
data = """\
one,two
1,a
2,b
3,c
4,d"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(dtype={'one': 'u1', 1: 'S1'})
result = reader.read()
assert result[0].dtype == 'u1'
assert result[1].dtype == 'S1'
reader = _make_reader(dtype={'one': np.uint8, 1: object})
result = reader.read()
assert result[0].dtype == 'u1'
assert result[1].dtype == 'O'
reader = _make_reader(dtype={'one': np.dtype('u1'),
1: np.dtype('O')})
result = reader.read()
assert result[0].dtype == 'u1'
assert result[1].dtype == 'O'
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(usecols=(1, 2))
result = reader.read()
exp = _make_reader().read()
assert len(result) == 2
assert (result[1] == exp[1]).all()
assert (result[2] == exp[2]).all()
def test_cr_delimited(self):
def _test(text, **kwargs):
nice_text = text.replace('\r', '\r\n')
result = TextReader(StringIO(text), **kwargs).read()
expected = TextReader(StringIO(nice_text), **kwargs).read()
assert_array_dicts_equal(result, expected)
data = 'a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12'
_test(data, delimiter=',')
data = 'a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12'
_test(data, delim_whitespace=True)
data = 'a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12'
_test(data, delimiter=',')
sample = ('A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r'
'AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r'
',BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0')
_test(sample, delimiter=',')
data = 'A B C\r 2 3\r4 5 6'
_test(data, delim_whitespace=True)
data = 'A B C\r2 3\r4 5 6'
_test(data, delim_whitespace=True)
def test_empty_field_eof(self):
data = 'a,b,c\n1,2,3\n4,,'
result = TextReader(StringIO(data), delimiter=',').read()
expected = {0: np.array([1, 4], dtype=np.int64),
1: np.array(['2', ''], dtype=object),
2: np.array(['3', ''], dtype=object)}
assert_array_dicts_equal(result, expected)
# GH5664
a = DataFrame([['b'], [nan]], columns=['a'], index=['a', 'c'])
b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]],
columns=list('abcd'),
index=[1, 1])
c = DataFrame([[1, 2, 3, 4], [6, nan, nan, nan],
[8, 9, 10, 11], [13, 14, nan, nan]],
columns=list('abcd'),
index=[0, 5, 7, 12])
for _ in range(100):
df = read_csv(StringIO('a,b\nc\n'), skiprows=0,
names=['a'], engine='c')
assert_frame_equal(df, a)
df = read_csv(StringIO('1,1,1,1,0\n' * 2 + '\n' * 2),
names=list("abcd"), engine='c')
assert_frame_equal(df, b)
df = read_csv(StringIO('0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14'),
names=list('abcd'), engine='c')
assert_frame_equal(df, c)
def test_empty_csv_input(self):
# GH14867
df = read_csv(StringIO(), chunksize=20, header=None,
names=['a', 'b', 'c'])
assert isinstance(df, TextFileReader)
def assert_array_dicts_equal(left, right):
for k, v in compat.iteritems(left):
assert tm.assert_numpy_array_equal(np.asarray(v),
np.asarray(right[k]))
| bsd-3-clause |
fyffyt/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
zuku1985/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 51 | 12300 |
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def safe_median(arr, *args, **kwargs):
# np.median([]) raises a TypeError for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.median(arr, *args, **kwargs)
def safe_mean(arr, *args, **kwargs):
# np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0] + 1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: safe_mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: safe_median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_array_equal(X, Xt)
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_equal(X.data, Xt.data)
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_equal(X.data, Xt.data)
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
xwolf12/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
apdjustino/DRCOG_Urbansim | src/opus_gui/results_manager/run/indicator_framework/visualizer/visualizers/matplotlib_lorenzcurve.py | 1 | 10890 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os, re, sys, time, traceback
from copy import copy
from opus_gui.results_manager.run.indicator_framework.visualizer.visualizers.abstract_visualization import Visualization
from opus_core.logger import logger
from numpy import array, arange
from numpy import ones, zeros, hstack, vstack
from numpy import trapz, trim_zeros
from pylab import subplot, plot, show
from pylab import xlabel, ylabel, title, text
from pylab import MultipleLocator, FormatStrFormatter
from pylab import savefig, clf, close
class LorenzCurve(Visualization):
def __init__(self, source_data, dataset_name,
attribute = None,
years = None, operation = None, name = None, scale = None,
storage_location = None):
Visualizer.__init__(self, source_data, dataset_name, [attribute],
years, operation, name,
storage_location)
self._values = None
self._ginicoeff = None
def is_single_year_indicator_image_type(self):
return True
def get_file_extension(self):
return 'png'
def get_visualization_shorthand(self):
return 'lorenzcurve'
def get_additional_metadata(self):
return {}
def _create_indicator(self, year):
"""Create a Lorenz Curve for the given indicator,
save it to the cache directory's 'indicators' sub-directory.
"""
attribute_short = self.get_attribute_alias(attribute = self.attributes[0],
year = year)
title = attribute_short + ' ' + str(year)
if self.run_description is not None:
title += '\n' + self.run_description
# Do calculation
# Make fresh copy with dtype float64 to avoid overflows
self._values = array(self._get_indicator(year, wrap = False).astype('float64'))
self._compute_lorenz()
file_path = self.get_file_path(year = year)
self._plot(attribute_short, file_path );
return file_path
def _compute_lorenz(self ):
''' Do the lorenz curve computation and save the result in the corresponding
class variables
'''
self._values.sort()
#remove 0 values from array
self._values = trim_zeros(self._values,'f')
num_values = self._values.size
F = arange(1, num_values + 1, 1, "float64")/num_values
L = self._values.cumsum(dtype="float64")/sum(self._values)
# Add (0, 0) as the first point for completeness (e.g. plotting)
origin = array([[0], [0]])
self._values = vstack((F, L))
self._values = hstack((origin, self._values))
# This is the simple form of (0.5 - integral) / 0.5
self._ginicoeff = 1 - 2 * trapz(self._values[1], self._values[0])
def _plot(self, attribute_name, file_path=None ):
clf() # Clear existing plot
a = self._values[0] * 100
b = self._values[1] * 100
ax = subplot(111)
plot(a, a, 'k--', a, b, 'r')
ax.set_ylim([0,100])
ax.grid(color='0.5', linestyle=':', linewidth=0.5)
xlabel('population')
ylabel(attribute_name)
title('Lorenz curve')
font = {'fontname' : 'Courier',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 11
}
box = { 'pad' : 6,
'facecolor' : 'w',
'linewidth' : 1,
'fill' : True
}
text(5, 90, 'Gini coefficient: %(gini)f' % {'gini' : self._ginicoeff}, font, color='k', bbox=box )
majorLocator = MultipleLocator(20)
majorFormatter = FormatStrFormatter('%d %%')
minorLocator = MultipleLocator(5)
ax.xaxis.set_major_locator( majorLocator )
ax.xaxis.set_major_formatter( majorFormatter)
ax.xaxis.set_minor_locator( minorLocator )
ax.yaxis.set_major_locator( majorLocator )
ax.yaxis.set_major_formatter( majorFormatter)
ax.yaxis.set_minor_locator( minorLocator )
if file_path:
savefig(file_path)
close()
else:
show()
import os
from opus_core.tests import opus_unittest
from numpy import allclose
from opus_gui.results_manager.run.indicator_framework.test_classes.abstract_indicator_test import AbstractIndicatorTest
class Tests(AbstractIndicatorTest):
def skip_test_create_indicator(self):
indicator_path = os.path.join(self.temp_cache_path, 'indicators')
self.assert_(not os.path.exists(indicator_path))
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
lorenzcurve.create(False)
self.assert_(os.path.exists(indicator_path))
self.assert_(os.path.exists(os.path.join(indicator_path, 'test__lorenzcurve__attribute__1980.png')))
def skip_test_perfect_equality(self):
"""Perfect equality is when everybody has the same amount of something"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = ones(100)
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result = vstack((arange(0, 101) / 100., arange(0, 101) / 100.))
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_perfect_inequality(self):
"""Perfect inequality is when one person has all of something"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = zeros(100)
incomes[0] = 42
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
#We strip all the zero values, so the result consists of only two values
wanted_result = [[0.,1.],[0.,1.]]
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_small_lorenz(self):
"""Test case for less than 100 people"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([1, 1, 2, 3, 4, 5])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result = array(
[[ 0, 1/6., 2/6., 3/6., 4/6., 5/6., 6/6. ],
[ 0, 1/16., 2/16., 4/16., 7/16., 11/16., 16/16. ]])
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_small_gini(self):
"""Test case for gini coefficient for the small case"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([1, 1, 2, 3, 4, 5])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
self.assertAlmostEqual(lorenzcurve._ginicoeff, 0.3125)
def skip_test_large_lorenz(self):
"""Test case for more than 100 people"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([731, 700, 619, 450, 419, 512, 232, 266, 131, 188,
498, 293, 935, 177, 160, 380, 538, 783, 256, 280,
731, 362, 870, 970, 674, 211, 524, 207, 513, 461,
280, 275, 410, 282, 144, 682, 573, 252, 382, 909,
719, 666, 236, 636, 628, 542, 630, 484, 629, 974,
747, 509, 281, 725, 377, 565, 495, 840, 391, 191,
929, 679, 217, 179, 336, 562, 293, 881, 271, 172,
426, 697, 293, 576, 203, 390, 522, 948, 312, 491,
531, 959, 646, 495, 306, 631, 722, 322, 876, 586,
316, 124, 796, 250, 456, 112, 661, 294, 749, 619,
134, 582, 996, 413, 421, 219, 796, 923, 832, 557])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result_F = arange(0, 111) / 110.
wanted_result_L = array([ 0, 0.00202803, 0.00427335, 0.00664542, 0.00907181, 0.01167928,
0.01457647, 0.01769094, 0.02089595, 0.02413718, 0.02754138,
0.03099989, 0.0346757 , 0.03842393, 0.04224459, 0.0461739 ,
0.05013943, 0.05434035, 0.0586137 , 0.06314055, 0.06770362,
0.07233912, 0.07715569, 0.0820628 , 0.08704234, 0.09211241,
0.09718249, 0.10227067, 0.10737696, 0.11268243, 0.1179879 ,
0.12329338, 0.12861696, 0.13415782, 0.13980734, 0.14552928,
0.15135987, 0.15744396, 0.16399884, 0.17082534, 0.17770615,
0.18462318, 0.19168508, 0.19876507, 0.20618911, 0.21366748,
0.22125448, 0.2288777 , 0.23659146, 0.2447398 , 0.25299678,
0.26134429, 0.27010828, 0.27899902, 0.28796219, 0.29692536,
0.30594285, 0.31515953, 0.32443052, 0.33371962, 0.34317169,
0.35265998, 0.36227502, 0.3720168 , 0.38183102, 0.39191685,
0.40209322, 0.41232391, 0.42269945, 0.43312932, 0.44366784,
0.45427878, 0.46548727, 0.47669576, 0.48806721, 0.49945678,
0.51086445, 0.52229023, 0.53380654, 0.54550393, 0.55747293,
0.56953247, 0.58173686, 0.5940318 , 0.60638105, 0.61900192,
0.63167711, 0.64469634, 0.65776989, 0.67089777, 0.68413428,
0.6973708 , 0.71089704, 0.72445949, 0.7386376 , 0.7530511 ,
0.7674646 , 0.78252997, 0.79774019, 0.81349364, 0.82935574,
0.84530837, 0.86176801, 0.87848115, 0.89530294, 0.91223337,
0.9293992 , 0.94676421, 0.9643284 , 0.98196502, 1. ])
self.assert_(allclose(lorenzcurve._values, vstack((wanted_result_F, wanted_result_L))))
if __name__ == '__main__':
try:
import matplotlib
except:
print 'could not import matplotlib'
else:
opus_unittest.main()
| agpl-3.0 |
aavanian/bokeh | bokeh/sampledata/tests/test_world_cities.py | 2 | 1963 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import pandas as pd
# Bokeh imports
from bokeh.util.testing import verify_all
# Module under test
#import bokeh.sampledata.world_cities as bsw
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.world_cities", ALL))
@pytest.mark.sampledata
def test_data():
import bokeh.sampledata.world_cities as bsw
assert isinstance(bsw.data, pd.DataFrame)
# don't check detail for external data
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| bsd-3-clause |
anacode/anacode-toolkit | anacode/api/writers.py | 1 | 20217 | # -*- coding: utf-8 -*-
import os
import csv
import datetime
import pandas as pd
from itertools import chain
from functools import partial
from anacode import codes
def backup(root, files):
"""Backs up `files` from `root` directory and return list of backed up
file names. Backed up files will have datetime suffix appended to original
file name.
:param root: Absolute path to folder where files to backup are located
:type root: str
:param files: Names of files that needs backing up
:type files: str
:return: list -- List of backed up file names
"""
backed_up = []
join = os.path.join
root_contents = os.listdir(root)
dt_str = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
for file_name in files:
if file_name not in root_contents:
continue
new_name = file_name + '_' + dt_str
os.rename(join(root, file_name), join(root, new_name))
backed_up.append(new_name)
return backed_up
HEADERS = {
'categories': [u'doc_id', u'text_order', u'category', u'probability'],
'concepts': [u'doc_id', u'text_order', u'concept', u'freq',
u'relevance_score', u'concept_type'],
'concepts_surface_strings': [u'doc_id', u'text_order', u'concept',
u'surface_string', u'text_span'],
'sentiments': [u'doc_id', u'text_order', u'sentiment_value'],
'absa_entities': [u'doc_id', u'text_order', u'entity_name', u'entity_type',
u'surface_string', u'text_span'],
'absa_normalized_texts': [u'doc_id', u'text_order', u'normalized_text'],
'absa_relations': [u'doc_id', u'text_order', u'relation_id',
u'opinion_holder', u'restriction', u'sentiment_value',
u'is_external', u'surface_string', u'text_span'],
'absa_relations_entities': [u'doc_id', u'text_order', u'relation_id',
u'entity_type', u'entity_name'],
'absa_evaluations': [u'doc_id', u'text_order', u'evaluation_id',
u'sentiment_value', u'surface_string', u'text_span'],
'absa_evaluations_entities': [u'doc_id', u'text_order', u'evaluation_id',
u'entity_type', u'entity_name'],
}
# `anacode.agg.aggregations.ApiDataset.from_path` depends
# on ordering of files defined in values here
CSV_FILES = {
'categories': ['categories.csv'],
'concepts': ['concepts.csv', 'concepts_surface_strings.csv'],
'sentiments': ['sentiments.csv'],
'absa': [
'absa_entities.csv', 'absa_normalized_texts.csv',
'absa_relations.csv', 'absa_relations_entities.csv',
'absa_evaluations.csv', 'absa_evaluations_entities.csv'
]
}
def categories_to_list(doc_id, analyzed, single_document=False):
"""Converts categories response to flat list with doc_id included.
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for categories call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with one key 'categories' pointing to flat list
of categories
"""
cat_list = []
for order, text_analyzed in enumerate(analyzed):
for result_dict in text_analyzed:
row = [doc_id, 0, result_dict.get('label'),
result_dict.get('probability')]
if single_document:
row[1] += order
else:
row[0] += order
cat_list.append(row)
return {'categories': cat_list}
def concepts_to_list(doc_id, analyzed, single_document=False):
"""Converts concepts response to flat lists with doc_id included
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for concepts call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with two keys: 'concepts' pointing to flat list
of found concepts and their metadata and 'concepts_surface_strings'
pointing to flat list of strings realizing found concepts
"""
con_list, exp_list = [], []
for order, text_analyzed in enumerate(analyzed):
for concept in text_analyzed or []:
row = [doc_id, 0, concept.get('concept'),
concept.get('freq'), concept.get('relevance_score'),
concept.get('type')]
if single_document:
row[1] += order
else:
row[0] += order
con_list.append(row)
for string in concept.get('surface', []):
surface_str, span = string['surface_string'], string['span']
exp_list.append([row[0], row[1], concept.get('concept'),
surface_str, '-'.join(map(str, span))])
return {'concepts': con_list, 'concepts_surface_strings': exp_list}
def sentiments_to_list(doc_id, analyzed, single_document=False):
"""Converts sentiments response to flat lists with doc_id included
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for sentiment call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with one key 'sentiments' pointing to flat list
of sentiment probabilities
"""
sen_list = []
for order, sentiment in enumerate(analyzed):
row = [doc_id, 0, sentiment['sentiment_value']]
if single_document:
# this should not happen
row[1] += order
else:
row[0] += order
sen_list.append(row)
return {'sentiments': sen_list}
def _absa_entities_to_list(doc_id, order, entities):
ent_list = []
for entity_dict in entities:
text_span = '-'.join(map(str, entity_dict['surface']['span']))
surface_string = entity_dict['surface']['surface_string']
for semantics in entity_dict['semantics']:
row = [doc_id, order, semantics['value'], semantics['type'],
surface_string, text_span]
ent_list.append(row)
return ent_list
def _absa_normalized_text_to_list(doc_id, order, normalized_text):
return [[doc_id, order, normalized_text]]
def _absa_relations_to_list(doc_id, order, relations):
rel_list, ent_list = [], []
for rel_index, rel in enumerate(relations):
rel_row = [doc_id, order, rel_index,
rel['semantics']['opinion_holder'],
rel['semantics']['restriction'],
rel['semantics']['sentiment_value'],
rel['external_entity'],
rel['surface']['surface_string'],
'-'.join(map(str, rel['surface']['span']))]
rel_list.append(rel_row)
for ent in rel['semantics'].get('entity', []):
ent_row = [doc_id, order, rel_index, ent['type'], ent['value']]
ent_list.append(ent_row)
return rel_list, ent_list
def _absa_evaluations_to_list(doc_id, order, evaluations):
eval_list, ent_list = [], []
for eval_index, evaluation in enumerate(evaluations):
eval_row = [doc_id, order, eval_index,
evaluation['semantics']['sentiment_value'],
evaluation['surface']['surface_string'],
'-'.join(map(str, evaluation['surface']['span']))]
eval_list.append(eval_row)
for ent in evaluation['semantics'].get('entity', []):
ent_row = [doc_id, order, eval_index, ent['type'], ent['value']]
ent_list.append(ent_row)
return eval_list, ent_list
def absa_to_list(doc_id, analyzed, single_document=False):
"""Converts ABSA response to flat lists with doc_id included
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for ABSA call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with six keys: 'absa_entities' pointing to flat
list of found entities with metadata, 'absa_normalized_texts' pointing to
flat list of normalized chinese texts, 'absa_relations' pointing to found
entity relations with metadata, 'absa_relations_entities' pointing to flat
list of entities that belong to absa relations, 'absa_evaluations'
pointing to flat list of entity evaluations with metadata and
'absa_evaluations_entities' specifying entities in absa_evaluations
"""
absa = {
'absa_entities': [],
'absa_normalized_texts': [],
'absa_relations': [],
'absa_relations_entities': [],
'absa_evaluations': [],
'absa_evaluations_entities': []
}
for order, text_analyzed in enumerate(analyzed):
if single_document:
current_id = doc_id
text_order = order
else:
current_id = doc_id + order
text_order = 0
entities = text_analyzed['entities']
ents = _absa_entities_to_list(current_id, text_order, entities)
text = text_analyzed['normalized_text']
texts = _absa_normalized_text_to_list(current_id, text_order, text)
relations = text_analyzed['relations']
rels, rel_ents = _absa_relations_to_list(current_id, text_order,
relations)
evaluations = text_analyzed['evaluations']
evals, eval_ents = _absa_evaluations_to_list(current_id, text_order,
evaluations)
absa['absa_entities'].extend(ents)
absa['absa_normalized_texts'].extend(texts)
absa['absa_relations'].extend(rels)
absa['absa_relations_entities'].extend(rel_ents)
absa['absa_evaluations'].extend(evals)
absa['absa_evaluations_entities'].extend(eval_ents)
return absa
class Writer(object):
"""Base "abstract" class containing common methods that are
needed by all implementations of Writer interface.
The writer interface consists of init, close and write_bulk methods.
"""
def __init__(self):
self.ids = {'scrape': 0, 'analyze': 0}
def write_row(self, call_type, call_result):
"""Decides what kind of data it got and calls appropriate write method.
:param call_type: Library's ID of anacode call
:type call_type: int
:param call_result: JSON response from Anacode API
:type call_result: list
"""
if call_type == codes.SCRAPE:
self.write_scrape(call_result)
if call_type == codes.ANALYZE:
self.write_analysis(call_result)
def _add_new_data_from_dict(self, new_data):
"""Not implemented here!
Used by write methods to submit new Anacode API response data for storage.
:param new_data: dict; keys are data sets names and values are
flat lists of rows
:type new_data: dict
"""
pass
def write_scrape(self, scraped):
self.ids['scrape'] += 1
def write_analysis(self, analyzed):
"""Inspects analysis result for performed analysis and delegates
persisting of results to appropriate write methods.
:param analyzed: JSON object analysis response
:type: dict
"""
single_document = analyzed.get('single_document', False)
analyzed_length = 1
if 'categories' in analyzed:
categories = analyzed['categories']
self.write_categories(categories, single_document=single_document)
if not single_document:
analyzed_length = len(categories)
if 'concepts' in analyzed:
concepts = analyzed['concepts']
self.write_concepts(concepts, single_document=single_document)
if not single_document:
analyzed_length = len(concepts)
if 'sentiment' in analyzed:
sentiment = analyzed['sentiment']
self.write_sentiment(sentiment, single_document=single_document)
if not single_document:
analyzed_length = len(sentiment)
if 'absa' in analyzed:
absa = analyzed['absa']
self.write_absa(analyzed['absa'], single_document=single_document)
if not single_document:
analyzed_length = len(absa)
self.ids['analyze'] += analyzed_length
def write_categories(self, analyzed, single_document=False):
"""Converts categories analysis result to flat lists and stores them.
:param analyzed: JSON categories analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = categories_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_concepts(self, analyzed, single_document=False):
"""Converts concepts analysis result to flat lists and stores them.
:param analyzed: JSON concepts analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = concepts_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_sentiment(self, analyzed, single_document=False):
"""Converts sentiment analysis result to flat lists and stores them.
:param analyzed: JSON sentiment analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = sentiments_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_absa(self, analyzed, single_document=False):
"""Converts absa analysis result to flat lists and stores them.
:param analyzed: JSON absa analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = absa_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_bulk(self, results):
"""Stores multiple anacode api's JSON responses marked with call IDs as
tuples (call_id, call_result). Both scrape and analyze call IDs
are defined in anacode.codes module.
:param results: List of anacode responses with IDs of calls used
:type results: list
"""
for call_type, call_result in results:
self.write_row(call_type, call_result)
def init(self):
"""Not implemented here! Each subclass should decide what to do here."""
pass
def close(self):
"""Not implemented here! Each subclass should decide what to do here."""
pass
def __enter__(self):
self.init()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class DataFrameWriter(Writer):
"""Writes Anacode API output into pandas.DataFrame instances."""
def __init__(self, frames=None):
"""Initializes dictionary of result frames. Alternatively uses given
frames dict for storage.
:param frames: Might be specified to use this instead of new dict
:type frames: dict
"""
super(DataFrameWriter, self).__init__()
self.frames = {} if frames is None else frames
self._row_data = {}
def init(self):
"""Initialized empty lists for each possible data frame."""
self._row_data = {
'categories': [],
'concepts': [],
'concepts_surface_strings': [],
'sentiments': [],
'absa_entities': [],
'absa_normalized_texts': [],
'absa_relations': [],
'absa_relations_entities': [],
'absa_evaluations': [],
'absa_evaluations_entities': [],
}
def close(self):
"""Creates pandas data frames to self.frames dict and clears internal
state.
"""
for name, row in self._row_data.items():
if len(row) > 0:
self.frames[name] = pd.DataFrame(row, columns=HEADERS[name])
self._row_data = {}
def _add_new_data_from_dict(self, new_data):
"""Stores anacode api result converted to flat lists.
:param new_data: Anacode api result
:param new_data: list
"""
for name, row_list in new_data.items():
self._row_data[name].extend(row_list)
class CSVWriter(Writer):
def __init__(self, target_dir='.'):
"""Initializes Writer to store Anacode API analysis results in target_dir in
csv files.
:param target_dir: Path to directory where to store csv files
:type target_dir: str
"""
super(CSVWriter, self).__init__()
self.target_dir = os.path.abspath(os.path.expanduser(target_dir))
self._files = {}
self.csv = {}
def _open_csv(self, csv_name):
path = partial(os.path.join, self.target_dir)
try:
return open(path(csv_name), 'w', newline='')
except TypeError:
return open(path(csv_name), 'wb')
def init(self):
"""Opens all csv files for writing and writes headers to them."""
self.close()
backup(self.target_dir, chain.from_iterable(CSV_FILES.values()))
self._files = {
'categories': self._open_csv('categories.csv'),
'concepts': self._open_csv('concepts.csv'),
'concepts_surface_strings': self._open_csv(
'concepts_surface_strings.csv'
),
'sentiments': self._open_csv('sentiments.csv'),
'absa_entities': self._open_csv('absa_entities.csv'),
'absa_normalized_texts': self._open_csv(
'absa_normalized_texts.csv'
),
'absa_relations': self._open_csv('absa_relations.csv'),
'absa_relations_entities': self._open_csv(
'absa_relations_entities.csv'
),
'absa_evaluations': self._open_csv('absa_evaluations.csv'),
'absa_evaluations_entities': self._open_csv(
'absa_evaluations_entities.csv'
),
}
self.csv = {name: csv.writer(fp) for name, fp in self._files.items()}
for name, writer in self.csv.items():
writer.writerow(HEADERS[name])
def _csv_has_content(self, csv_path):
if not os.path.isfile(csv_path):
return False
with open(csv_path) as fp:
for line_count, line in enumerate(fp):
if line_count == 1 and len(line.strip()) != '':
return True
return False
def close(self):
"""Closes all csv files and removes empty ones."""
for name, file in self._files.items():
try:
file.close()
except (IOError, AttributeError):
print('Problem closing "{}"'.format(name))
for file_list in CSV_FILES.values():
for file_name in file_list:
path = os.path.join(self.target_dir, file_name)
if os.path.isfile(path) and not self._csv_has_content(path):
os.unlink(path)
self._files = {}
self.csv = {}
def _add_new_data_from_dict(self, new_data):
"""Stores anacode api result converted to flat lists.
:param new_data: Anacode api result
:param new_data: list
"""
for name, row_list in new_data.items():
self.csv[name].writerows(row_list)
| bsd-3-clause |
smblance/ggplot | ggplot/tests/test_chart_components.py | 12 | 1664 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from nose.tools import assert_raises, assert_equal, assert_is_none
from ggplot import *
from ggplot.utils.exceptions import GgplotError
def test_chart_components():
"""
Test invalid arguments to chart components
"""
df = pd.DataFrame({'x': np.arange(10),
'y': np.arange(10)})
gg = ggplot(df, aes(x='x', y='y'))
# test ggtitle
assert_raises(GgplotError, ggtitle, None)
# test xlim
assert_raises(GgplotError, xlim, "foo", 1)
assert_raises(GgplotError, xlim, "foo", "bar")
# test ylim
assert_raises(GgplotError, ylim, "foo", 1)
assert_raises(GgplotError, ylim, "foo", "bar")
# test xlab
assert_raises(GgplotError, ylab, None)
# test ylab
assert_raises(GgplotError, ylab, None)
# test labs
test_xlab = 'xlab'
gg_xlab = gg + labs(x=test_xlab)
assert_equal(gg_xlab.xlab, test_xlab)
assert_is_none(gg_xlab.ylab)
assert_is_none(gg_xlab.title)
test_ylab = 'ylab'
gg_ylab = gg + labs(y=test_ylab)
assert_is_none(gg_ylab.xlab)
assert_equal(gg_ylab.ylab, test_ylab)
assert_is_none(gg_ylab.title)
test_title = 'title'
gg_title = gg + labs(title=test_title)
assert_is_none(gg_title.xlab)
assert_is_none(gg_title.ylab)
assert_equal(gg_title.title, test_title)
gg_labs = gg + labs(x=test_xlab, y=test_ylab, title=test_title)
assert_equal(gg_labs.xlab, test_xlab)
assert_equal(gg_labs.ylab, test_ylab)
assert_equal(gg_labs.title, test_title)
| bsd-2-clause |
kikocorreoso/mplutils | mplutils/axes.py | 1 | 8516 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 23:43:37 2016
@author: kiko
"""
from __future__ import division, absolute_import
from .settings import RICH_DISPLAY
import numpy as np
if RICH_DISPLAY:
from IPython.display import display
def axes_set_better_defaults(ax,
axes_color = '#777777',
grid = False,
show = False):
"""
Enter an Axes instance and it will change the defaults to an opinionated
version of how a simple plot should be.
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
axes_color : str
A string indicating a valid matplotlib color.
grid : bool
If `True` the grid of the axes will be shown, if `False` (default)
the grid, if active, will be supressed.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
ax.set_axis_bgcolor((1, 1, 1))
ax.grid(grid)
for key in ax.spines.keys():
if ax.spines[key].get_visible():
ax.spines[key].set_color(axes_color)
ax.tick_params(axis = 'x', colors = axes_color)
ax.tick_params(axis = 'y', colors = axes_color)
ax.figure.set_facecolor('white')
ax.figure.canvas.draw()
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
# http://matplotlib.org/examples/pylab_examples/spine_placement_demo.html
def axes_set_axis_position(ax,
spines = ['bottom', 'left'],
pan = 0,
show = False):
"""
Enter an Axes instance and depending the options it will display the
axis where you selected.
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
spines : str or iterable
A string or an iterable of strings with the following valid options:
'bottom' : To active the bottom x-axis.
'top' : To active the top x-axis.
'left' : To active the left y-axis.
'right' : To active the right y-axis.
pan : int or iterable
A integer value or an iterable of integer values indicating the value
to pan the axis. It has to have the same lenght and the same order
than the spines input.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
if np.isscalar(spines):
spines = (spines,)
len_spines = 1
else:
len_spines = len(spines)
if np.isscalar(pan):
pan = np.repeat(pan, len_spines)
len_pan = 1
else:
len_pan = len(pan)
if len_pan > 1 and len_pan != len_spines:
raise ValueError(('Length of `spines` and `pan` mismatch. `pan` ')
('should be a scalar or should have the same length than `spines`.'))
i = 0
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', pan[i])) # outward by `pan` points
spine.set_smart_bounds(True)
i += 1
else:
#spine.set_color('none') # don't draw spine
spine.set_visible(False)
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
ax.tick_params(labelleft = True)
if 'right' in spines:
ax.yaxis.set_ticks_position('right')
ax.tick_params(labelright = True)
if 'left' in spines and 'right' in spines:
ax.yaxis.set_ticks_position('both')
ax.tick_params(labelleft = True, labelright = True)
if 'left' not in spines and 'right' not in spines:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(labelbottom = True)
if 'top' in spines:
ax.xaxis.set_ticks_position('top')
ax.tick_params(labeltop = True)
if 'bottom' in spines and 'top' in spines:
ax.xaxis.set_ticks_position('both')
ax.tick_params(labelbottom = True, labeltop = True)
if 'bottom' not in spines and 'top' not in spines:
ax.xaxis.set_ticks([])
ax.figure.canvas.draw()
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
def axes_set_origin(ax,
x = 0,
y = 0,
xticks_position = 'bottom',
yticks_position = 'left',
xticks_visible = True,
yticks_visible = True,
show = False):
"""
function to locate x-axis and y-axis on the position you want.
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
x : int or float
Value indicating the position on the y-axis where you want the x-axis
to be located.
y : int or float
Value indicating the position on the x-axis where you want the y-axis
to be located.
xticks_position : str
Default value is 'bottom' if you want the ticks to be located below
the x-axis. 'top' if you want the ticks to be located above the x-axis.
yticks_position : str
Default value is 'left' if you want the ticks to be located on the left
side of the y-axis. 'right' if you want the ticks to be located on the
right side of the y-axis.
xticks_visible : bool
Default value is True if you want ticks visible on the x-axis. False
if you don't want to see the ticks on the x-axis.
yticks_visible : bool
Default value is True if you want ticks visible on the y-axis. False
if you don't want to see the ticks on the y-axis.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position(xticks_position)
ax.spines['bottom'].set_position(('data', x))
ax.yaxis.set_ticks_position(yticks_position)
ax.spines['left'].set_position(('data', y))
if not xticks_visible:
ax.set_xticks([])
if not yticks_visible:
ax.set_yticks([])
ax.figure.canvas.draw()
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
def axes_set_aspect_ratio(ax, ratio = 'equal', show = True):
"""
function that accepts an Axes instance and update the information
setting the aspect ratio of the axis to the defined quantity
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
ratio : str or int/float
The value can be a string with the following values:
'equal' : (default) same scaling from data to plot units for x and y
'auto' : automatic; fill position rectangle with data
Or a:
number (int or float) : a circle will be stretched such that the
height is num times the width. aspec t =1 is the same as
aspect='equal'.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
ax.set_aspect(ratio, adjustable = None)
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show() | mit |
robcarver17/pysystemtrade | systems/accounts/pandl_calculators/pandl_generic_costs.py | 1 | 3494 | import pandas as pd
from systems.accounts.pandl_calculators.pandl_calculation import pandlCalculation, apply_weighting
curve_types = ['gross', 'net', 'costs']
GROSS_CURVE = 'gross'
NET_CURVE = 'net'
COSTS_CURVE = 'costs'
class pandlCalculationWithGenericCosts(pandlCalculation):
def weight(self, weight: pd.Series):
weighted_capital = apply_weighting(weight, self.capital)
weighted_positions = apply_weighting(weight, self.positions)
return pandlCalculationWithGenericCosts(self.price,
positions = weighted_positions,
fx = self.fx,
capital = weighted_capital,
value_per_point = self.value_per_point,
roundpositions = self.roundpositions,
delayfill = self.delayfill)
def as_pd_series(self, percent = False, curve_type=NET_CURVE):
if curve_type==NET_CURVE:
if percent:
return self.net_percentage_pandl()
else:
return self.net_pandl_in_base_currency()
elif curve_type==GROSS_CURVE:
if percent:
return self.percentage_pandl()
else:
return self.pandl_in_base_currency()
elif curve_type==COSTS_CURVE:
if percent:
return self.costs_percentage_pandl()
else:
return self.costs_pandl_in_base_currency()
else:
raise Exception("Curve type %s not recognised! Must be one of %s" % (curve_type, curve_types))
def net_percentage_pandl(self) -> pd.Series:
gross = self.percentage_pandl()
costs = self.costs_percentage_pandl()
net = _add_gross_and_costs(gross, costs)
return net
def net_pandl_in_base_currency(self) -> pd.Series:
gross = self.pandl_in_base_currency()
costs = self.costs_pandl_in_base_currency()
net = _add_gross_and_costs(gross, costs)
return net
def net_pandl_in_instrument_currency(self) -> pd.Series:
gross = self.pandl_in_instrument_currency()
costs = self.costs_pandl_in_instrument_currency()
net = _add_gross_and_costs(gross, costs)
return net
def net_pandl_in_points(self) -> pd.Series:
gross = self.pandl_in_points()
costs = self.costs_pandl_in_points()
net = _add_gross_and_costs(gross, costs)
return net
def costs_percentage_pandl(self) -> pd.Series:
costs_in_base = self.costs_pandl_in_base_currency()
costs = self._percentage_pandl_given_pandl(costs_in_base)
return costs
def costs_pandl_in_base_currency(self) -> pd.Series:
costs_in_instr_ccy = self.costs_pandl_in_instrument_currency()
costs_in_base = self._base_pandl_given_currency_pandl(costs_in_instr_ccy)
return costs_in_base
def costs_pandl_in_instrument_currency(self) -> pd.Series:
costs_in_points = self.costs_pandl_in_points()
costs_in_instr_ccy = self._pandl_in_instrument_ccy_given_points_pandl(costs_in_points)
return costs_in_instr_ccy
def costs_pandl_in_points(self) -> pd.Series:
raise NotImplementedError
def _add_gross_and_costs(gross: pd.Series,
costs: pd.Series):
cumsum_costs = costs.cumsum()
cumsum_costs_aligned = cumsum_costs.reindex(gross.index, method="ffill")
costs_aligned = cumsum_costs_aligned.diff()
net = gross + costs_aligned
return net
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.