remove newer scipy dir

Former-commit-id: cf5e73888ce42fb15a1858d142499ad5bc207035 [formerly 2812a04dbc09bfa01121082b8fe7e9e977261bb9] [formerly c04d92b25a [formerly 82b9d06051]]
Former-commit-id: c04d92b25a
Former-commit-id: 316f21b41f
This commit is contained in:
mjames-upc 2015-08-24 11:59:55 -06:00
parent 05cd919443
commit f36bde7795
2756 changed files with 0 additions and 1160782 deletions

View file

@ -1,3 +0,0 @@
[run]
branch = True
include = */scipy/*

View file

@ -1,44 +0,0 @@
* text=auto
tools/win32/build_scripts/nsis_scripts/*.nsi.in eol=crlf
# Don't want to convert line endings of this file, causes test failure on
# Windows
scipy/io/matlab/tests/data/japanese_utf8.txt binary
# Autogenerated files
scipy/special/_ufuncs_cxx.pyx binary
scipy/special/_ufuncs_cxx.pxd binary
scipy/special/_ufuncs.pyx binary
# SWIG-generated files
scipy/sparse/sparsetools/bsr.py binary
scipy/sparse/sparsetools/bsr_wrap.cxx binary
scipy/sparse/sparsetools/coo.py binary
scipy/sparse/sparsetools/coo_wrap.cxx binary
scipy/sparse/sparsetools/csc.py binary
scipy/sparse/sparsetools/csc_wrap.cxx binary
scipy/sparse/sparsetools/csr.py binary
scipy/sparse/sparsetools/csr_wrap.cxx binary
scipy/sparse/sparsetools/dia.py binary
scipy/sparse/sparsetools/dia_wrap.cxx binary
scipy/sparse/sparsetools/csgraph.py binary
scipy/sparse/sparsetools/csgraph_wrap.cxx binary
# Numerical data files
scipy/special/tests/data/*.txt binary
scipy/special/tests/data/*/*.txt binary
scipy/special/tests/data/*/*/*.txt binary
# SWIG-generated files
scipy/sparse/sparsetools/bsr.py binary
scipy/sparse/sparsetools/bsr_wrap.cxx binary
scipy/sparse/sparsetools/coo.py binary
scipy/sparse/sparsetools/coo_wrap.cxx binary
scipy/sparse/sparsetools/csc.py binary
scipy/sparse/sparsetools/csc_wrap.cxx binary
scipy/sparse/sparsetools/csr.py binary
scipy/sparse/sparsetools/csr_wrap.cxx binary
scipy/sparse/sparsetools/dia.py binary
scipy/sparse/sparsetools/dia_wrap.cxx binary
scipy/sparse/sparsetools/csgraph.py binary
scipy/sparse/sparsetools/csgraph_wrap.cxx binary

View file

@ -1,211 +0,0 @@
# Editor temporary/working/backup files #
#########################################
.#*
[#]*#
*~
*$
*.bak
.idea/*
*.kdev4
*.org
.project
.pydevproject
*.rej
.settings/
.*.sw[nop]
.sw[nop]
*.tmp
*.vim
tags
# Compiled source #
###################
*.a
*.com
*.class
*.dll
*.exe
*.l[ao]
*.o
*.py[ocd]
*.so
_configtest.c
# Packages #
############
# it's better to unpack these files and commit the raw source
# git has its own built in compression methods
*.7z
*.bz2
*.bzip2
*.dmg
*.gz
*.iso
*.jar
*.rar
*.tar
*.tbz2
*.tgz
*.zip
# Python files #
################
# setup.py working directory
build
# sphinx build directory
doc/_build
# cython files
cythonize.dat
# setup.py dist directory
dist
# Egg metadata
*.egg-info
# tox testing tool
.tox
# The shelf plugin uses this dir
./.shelf
MANIFEST
# distutils configuration
site.cfg
# other temporary files
.coverage
.deps
.libs
# Paver generated files #
#########################
/release
# Logs and databases #
######################
*.log
*.sql
*.sqlite
# Patches #
###########
*.patch
*.diff
# OS generated files #
######################
.directory
.fseventsd
.DS_Store*
.gdb_history
.VolumeIcon.icns
ehthumbs.db
Icon?
Thumbs.db
# Documentation generated files #
#################################
doc/frontpage/build
doc/source/generated
# Things specific to this project #
###################################
benchmarks/env
benchmarks/results
benchmarks/scipy
benchmarks/html
benchmarks/scipy-benchmarks
scipy/__config__.py
scipy/cluster/_vq.c
scipy/cluster/_hierarchy.c
scipy/fftpack/_fftpackmodule.c
scipy/fftpack/convolvemodule.c
scipy/fftpack/src/dct.c
scipy/fftpack/src/dst.c
scipy/integrate/_dopmodule.c
scipy/integrate/lsodamodule.c
scipy/integrate/vodemodule.c
scipy/interpolate/_ppoly.c
scipy/interpolate/interpnd.c
scipy/interpolate/src/dfitpack-f2pywrappers.f
scipy/interpolate/src/dfitpackmodule.c
scipy/io/matlab/mio5_utils.c
scipy/io/matlab/mio_utils.c
scipy/io/matlab/streams.c
scipy/lib/blas/cblas.pyf
scipy/lib/blas/cblasmodule.c
scipy/lib/blas/fblas-f2pywrappers.f
scipy/lib/blas/fblas.pyf
scipy/lib/blas/fblasmodule.c
scipy/lib/blas/fblaswrap.f
scipy/lib/lapack/clapack.pyf
scipy/lib/lapack/clapackmodule.c
scipy/lib/lapack/flapack.pyf
scipy/lib/lapack/flapackmodule.c
scipy/linalg/_cblasmodule.c
scipy/linalg/_clapackmodule.c
scipy/linalg/_fblas-f2pywrappers.f
scipy/linalg/_fblasmodule.c
scipy/linalg/_flapack-f2pywrappers.f
scipy/linalg/_flapackmodule.c
scipy/linalg/_interpolativemodule.c
scipy/linalg/_solve_toeplitz.c
scipy/linalg/_decomp_update.c
scipy/linalg/_decomp_update.pyx
scipy/linalg/cblas.pyf
scipy/linalg/clapack.pyf
scipy/linalg/cython_blas.c
scipy/linalg/cython_lapack.c
scipy/linalg/fblas.pyf
scipy/linalg/flapack.pyf
scipy/linalg/src/id_dist/src/*_subr_*.f
scipy/ndimage/src/_ni_label.c
scipy/optimize/cobyla/_cobylamodule.c
scipy/optimize/lbfgsb/_lbfgsbmodule.c
scipy/optimize/minpack2/minpack2module.c
scipy/optimize/nnls/_nnlsmodule.c
scipy/optimize/slsqp/_slsqpmodule.c
scipy/signal/_spectral.c
scipy/signal/_max_len_seq_inner.c
scipy/signal/correlate_nd.c
scipy/signal/lfilter.c
scipy/sparse/_csparsetools.c
scipy/sparse/_csparsetools.pyx
scipy/sparse/csgraph/_min_spanning_tree.c
scipy/sparse/csgraph/_shortest_path.c
scipy/sparse/csgraph/_tools.c
scipy/sparse/csgraph/_traversal.c
scipy/sparse/csgraph/_reordering.c
scipy/sparse/linalg/dsolve/umfpack/_umfpack.py
scipy/sparse/linalg/dsolve/umfpack/_umfpack_wrap.c
scipy/sparse/linalg/eigen/arpack/_arpack-f2pywrappers.f
scipy/sparse/linalg/eigen/arpack/_arpackmodule.c
scipy/sparse/linalg/eigen/arpack/arpack.pyf
scipy/sparse/linalg/isolve/iterative/BiCGREVCOM.f
scipy/sparse/linalg/isolve/iterative/BiCGSTABREVCOM.f
scipy/sparse/linalg/isolve/iterative/CGREVCOM.f
scipy/sparse/linalg/isolve/iterative/CGSREVCOM.f
scipy/sparse/linalg/isolve/iterative/GMRESREVCOM.f
scipy/sparse/linalg/isolve/iterative/QMRREVCOM.f
scipy/sparse/linalg/isolve/iterative/STOPTEST2.f
scipy/sparse/linalg/isolve/iterative/_iterative.pyf
scipy/sparse/linalg/isolve/iterative/_iterativemodule.c
scipy/sparse/linalg/isolve/iterative/getbreak.f
scipy/sparse/sparsetools/bsr_impl.h
scipy/sparse/sparsetools/csc_impl.h
scipy/sparse/sparsetools/csr_impl.h
scipy/sparse/sparsetools/other_impl.h
scipy/sparse/sparsetools/sparsetools_impl.h
scipy/spatial/ckdtree/ckdtree.cxx
scipy/spatial/ckdtree/ckdtree.h
scipy/spatial/qhull.c
scipy/special/_ellip_harm_2.c
scipy/special/_ellip_harm_2.h
scipy/special/_logit.c
scipy/special/_ufuncs.c
scipy/special/_ufuncs.h
scipy/special/_ufuncs_cxx.cxx
scipy/special/_ufuncs_cxx.h
scipy/special/specfunmodule.c
scipy/stats/_rank.c
scipy/stats/mvn-f2pywrappers.f
scipy/stats/mvnmodule.c
scipy/stats/statlibmodule.c
scipy/stats/vonmises_cython.c
scipy/version.py
scipy/special/_exprel.c

View file

@ -1,6 +0,0 @@
[submodule "doc/scipy-sphinx-theme"]
path = doc/scipy-sphinx-theme
url = https://github.com/scipy/scipy-sphinx-theme.git
[submodule "doc/sphinxext"]
path = doc/sphinxext
url = https://github.com/numpy/numpydoc.git

View file

@ -1,120 +0,0 @@
# After changing this file, check it on:
# http://lint.travis-ci.org/
language: python
sudo: false
matrix:
include:
- python: 2.7
env:
- PYFLAKES=1
- PEP8=1
- NUMPYSPEC=numpy
before_install:
- pip install pep8==1.5.1
- pip install pyflakes
script:
- PYFLAKES_NODOCTEST=1 pyflakes scipy benchmarks/benchmarks | grep -E -v 'unable to detect undefined names|assigned to but never used|imported but unused|redefinition of unused' > test.out; cat test.out; test \! -s test.out
- pep8 scipy benchmarks/benchmarks
- python: 2.7
env:
- TESTMODE=fast
- REFGUIDE_CHECK=1
- COVERAGE=
- NPY_RELAXED_STRIDES_CHECKING=1
- NUMPYSPEC="--upgrade git+git://github.com/numpy/numpy.git@v1.9.1"
addons:
apt:
packages:
- libatlas-dev
- libatlas-base-dev
- liblapack-dev
- gfortran
- libgmp-dev
- libmpfr-dev
- ccache
- libfreetype6-dev
- libpng-dev
- zlib1g-dev
- texlive-fonts-recommended
- python: 3.4
env:
- TESTMODE=fast
- COVERAGE=
- NUMPYSPEC=numpy
- python: 3.3
env:
- TESTMODE=fast
- COVERAGE=
- NUMPYSPEC=numpy
- python: 3.2
env:
- TESTMODE=fast
- COVERAGE=
- NUMPYSPEC=numpy
- python: 2.7
env:
- TESTMODE=full
- COVERAGE=--coverage
- NUMPYSPEC=numpy
- python: 2.6
env:
- TESTMODE=fast
- OPTIMIZE=-OO
- NUMPYSPEC="numpy==1.6.2"
addons:
apt:
packages:
- libatlas-dev
- libatlas-base-dev
- liblapack-dev
- gfortran
- libgmp-dev
- libmpfr-dev
- ccache
cache:
directories:
- $HOME/.ccache
before_install:
- export PATH=/usr/lib/ccache:$PATH
- uname -a
- free -m
- df -h
- ulimit -a
- mkdir builds
- pushd builds
# Install gmpy2 dependencies
- mkdir -p $HOME/.local
- wget ftp://ftp.gnu.org/gnu/mpc/mpc-1.0.2.tar.gz
- tar xzvf mpc-1.0.2.tar.gz
- pushd mpc-1.0.2
- ./configure --prefix=$HOME/.local
- make
- make install
- popd
- export CPATH=$HOME/.local/include
- export LIBRARY_PATH=$HOME/.local/lib
# End install gmpy2 dependencies
# Speed up install by not compiling Cython
- travis_retry pip install --install-option="--no-cython-compile" Cython==0.22
- travis_retry pip install $NUMPYSPEC
- travis_retry pip install nose mpmath argparse Pillow
- travis_retry pip install gmpy2 # speeds up mpmath (scipy.special tests)
- if [ "${TESTMODE}" == "full" ]; then pip install coverage; fi
- |
if [ "${REFGUIDE_CHECK}" == "1" ]; then
travis_retry pip install matplotlib Sphinx
fi
- python -V
- popd
- set -o pipefail
script:
- python -c 'import numpy as np; print("relaxed strides checking:", np.ones((10,1),order="C").flags.f_contiguous)'
# Make sure that relaxed strides checking is actually in effect; otherwise fail loudly
- if [ "$NPY_RELAXED_STRIDES_CHECKING" = "1" ]; then python -c'import numpy as np; assert np.ones((10,1),order="C").flags.f_contiguous'; fi
- python -u $OPTIMIZE runtests.py -g -m $TESTMODE $COVERAGE |& tee runtests.log
- tools/validate_runtests_log.py $TESTMODE < runtests.log
- if [ "${REFGUIDE_CHECK}" == "1" ]; then python runtests.py --refguide-check; fi
notifications:
# Perhaps we should have status emails sent to the mailing list, but
# let's wait to see what people think before turning that on.
email: false

View file

@ -1,30 +0,0 @@
No-frill version:
* Clone bento::
$ git clone git://github.com/cournape/Bento.git bento
* Bootstrap bento::
$ cd bento && python bootstrap.py
* Clone Waf::
$ git clone https://code.google.com/p/waf/
$ git checkout waf-1.7.13 # waf breaks API regularly, this version works
* Set the WAFDIR environment variable to the base dir of the waf repo you
just created (in your bash_login for example if you're going to build with
Bento often). This is unfortunately needed, Waf is not installable like a
regular Python package::
$ export WAFDIR=ROOT_OF_WAF_REPO
# WAFDIR should be such as $WAFDIR/waflib exists
* Build scipy with Bento::
$ BENTO_ROOT/bentomaker build -j 4 # 4 threads in parallel
# or with progress bar
$ BENTO_ROOT/bentomaker build -p
# or with verbose output
$ BENTO_ROOT/bentomaker build -v

View file

@ -1,481 +0,0 @@
=====================
Contributing to SciPy
=====================
This document aims to give an overview of how to contribute to SciPy. It
tries to answer commonly asked questions, and provide some insight into how the
community process works in practice. Readers who are familiar with the SciPy
community and are experienced Python coders may want to jump straight to the
`git workflow`_ documentation.
.. note::
You may want to check the latest version of this guide, which is
available at:
https://github.com/scipy/scipy/blob/master/HACKING.rst.txt
Contributing new code
=====================
If you have been working with the scientific Python toolstack for a while, you
probably have some code lying around of which you think "this could be useful
for others too". Perhaps it's a good idea then to contribute it to SciPy or
another open source project. The first question to ask is then, where does
this code belong? That question is hard to answer here, so we start with a
more specific one: *what code is suitable for putting into SciPy?*
Almost all of the new code added to scipy has in common that it's potentially
useful in multiple scientific domains and it fits in the scope of existing
scipy submodules. In principle new submodules can be added too, but this is
far less common. For code that is specific to a single application, there may
be an existing project that can use the code. Some scikits (`scikit-learn`_,
`scikit-image`_, `statsmodels`_, etc.) are good examples here; they have a
narrower focus and because of that more domain-specific code than SciPy.
Now if you have code that you would like to see included in SciPy, how do you
go about it? After checking that your code can be distributed in SciPy under a
compatible license (see FAQ for details), the first step is to discuss on the
scipy-dev mailing list. All new features, as well as changes to existing code,
are discussed and decided on there. You can, and probably should, already
start this discussion before your code is finished.
Assuming the outcome of the discussion on the mailing list is positive and you
have a function or piece of code that does what you need it to do, what next?
Before code is added to SciPy, it at least has to have good documentation, unit
tests and correct code style.
1. Unit tests
In principle you should aim to create unit tests that exercise all the code
that you are adding. This gives some degree of confidence that your code
runs correctly, also on Python versions and hardware or OSes that you don't
have available yourself. An extensive description of how to write unit
tests is given in the NumPy `testing guidelines`_.
2. Documentation
Clear and complete documentation is essential in order for users to be able
to find and understand the code. Documentation for individual functions
and classes -- which includes at least a basic description, type and
meaning of all parameters and returns values, and usage examples in
`doctest`_ format -- is put in docstrings. Those docstrings can be read
within the interpreter, and are compiled into a reference guide in html and
pdf format. Higher-level documentation for key (areas of) functionality is
provided in tutorial format and/or in module docstrings. A guide on how to
write documentation is given in `how to document`_.
3. Code style
Uniformity of style in which code is written is important to others trying
to understand the code. SciPy follows the standard Python guidelines for
code style, `PEP8`_. In order to check that your code conforms to PEP8,
you can use the `pep8 package`_ style checker. Most IDEs and text editors
have settings that can help you follow PEP8, for example by translating
tabs by four spaces. Using `pyflakes`_ to check your code is also a good
idea.
At the end of this document a checklist is given that may help to check if your
code fulfills all requirements for inclusion in SciPy.
Another question you may have is: *where exactly do I put my code*? To answer
this, it is useful to understand how the SciPy public API (application
programming interface) is defined. For most modules the API is two levels
deep, which means your new function should appear as
``scipy.submodule.my_new_func``. ``my_new_func`` can be put in an existing or
new file under ``/scipy/<submodule>/``, its name is added to the ``__all__``
list in that file (which lists all public functions in the file), and those
public functions are then imported in ``/scipy/<submodule>/__init__.py``. Any
private functions/classes should have a leading underscore (``_``) in their
name. A more detailed description of what the public API of SciPy is, is given
in `SciPy API`_.
Once you think your code is ready for inclusion in SciPy, you can send a pull
request (PR) on Github. We won't go into the details of how to work with git
here, this is described well in the `git workflow`_ section of the NumPy
documentation and on the `Github help pages`_. When you send the PR for a new
feature, be sure to also mention this on the scipy-dev mailing list. This can
prompt interested people to help review your PR. Assuming that you already got
positive feedback before on the general idea of your code/feature, the purpose
of the code review is to ensure that the code is correct, efficient and meets
the requirements outlined above. In many cases the code review happens
relatively quickly, but it's possible that it stalls. If you have addressed
all feedback already given, it's perfectly fine to ask on the mailing list
again for review (after a reasonable amount of time, say a couple of weeks, has
passed). Once the review is completed, the PR is merged into the "master"
branch of SciPy.
The above describes the requirements and process for adding code to SciPy. It
doesn't yet answer the question though how decisions are made exactly. The
basic answer is: decisions are made by consensus, by everyone who chooses to
participate in the discussion on the mailing list. This includes developers,
other users and yourself. Aiming for consensus in the discussion is important
-- SciPy is a project by and for the scientific Python community. In those
rare cases that agreement cannot be reached, the `maintainers`_ of the module
in question can decide the issue.
Contributing by helping maintain existing code
==============================================
The previous section talked specifically about adding new functionality to
SciPy. A large part of that discussion also applies to maintenance of existing
code. Maintenance means fixing bugs, improving code quality or style,
documenting existing functionality better, adding missing unit tests, keeping
build scripts up-to-date, etc. The SciPy `issue list`_ contains all
reported bugs, build/documentation issues, etc. Fixing issues
helps improve the overall quality of SciPy, and is also a good way
of getting familiar with the project. You may also want to fix a bug because
you ran into it and need the function in question to work correctly.
The discussion on code style and unit testing above applies equally to bug
fixes. It is usually best to start by writing a unit test that shows the
problem, i.e. it should pass but doesn't. Once you have that, you can fix the
code so that the test does pass. That should be enough to send a PR for this
issue. Unlike when adding new code, discussing this on the mailing list may
not be necessary - if the old behavior of the code is clearly incorrect, no one
will object to having it fixed. It may be necessary to add some warning or
deprecation message for the changed behavior. This should be part of the
review process.
Other ways to contribute
========================
There are many ways to contribute other than contributing code. Participating
in discussions on the scipy-user and scipy-dev *mailing lists* is a contribution
in itself. The `scipy.org`_ *website* contains a lot of information on the
SciPy community and can always use a new pair of hands.
Recommended development setup
=============================
Since Scipy contains parts written in C, C++, and Fortran that need to be
compiled before use, make sure you have the necessary compilers and Python
development headers installed. Having compiled code also means that importing
Scipy from the development sources needs some additional steps, which are
explained below.
First fork a copy of the main Scipy repository in Github onto your own
account and then create your local repository via::
$ git clone git@github.com:YOURUSERNAME/scipy.git scipy
$ cd scipy
$ git remote add upstream git://github.com/scipy/scipy.git
To build the development version of Scipy and run tests, spawn
interactive shells with the Python import paths properly set up etc.,
do one of::
$ python runtests.py -v
$ python runtests.py -v -s optimize
$ python runtests.py -v -t scipy/special/tests/test_basic.py:test_xlogy
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
This builds Scipy first, so the first time it may take some time. If
you specify ``-n``, the tests are run against the version of Scipy (if
any) found on current PYTHONPATH.
Using ``runtests.py`` is the recommended approach to running tests.
There are also a number of alternatives to it, for example in-place
build or installing to a virtualenv. See the FAQ below for details.
Some of the tests in Scipy are very slow and need to be separately
enabled. See the FAQ below for details.
SciPy structure
===============
All SciPy modules should follow the following conventions. In the
following, a *SciPy module* is defined as a Python package, say
``yyy``, that is located in the scipy/ directory.
* Ideally, each SciPy module should be as self-contained as possible.
That is, it should have minimal dependencies on other packages or
modules. Even dependencies on other SciPy modules should be kept to
a minimum. A dependency on NumPy is of course assumed.
* Directory ``yyy/`` contains:
- A file ``setup.py`` that defines
``configuration(parent_package='',top_path=None)`` function
for `numpy.distutils`.
- A directory ``tests/`` that contains files ``test_<name>.py``
corresponding to modules ``yyy/<name>{.py,.so,/}``.
* Private modules should be prefixed with an underscore ``_``,
for instance ``yyy/_somemodule.py``.
* User-visible functions should have good documentation following
the Numpy documentation style, see `how to document`_
* The ``__init__.py`` of the module should contain the main reference
documentation in its docstring. This is connected to the Sphinx
documentation under ``doc/`` via Sphinx's automodule directive.
The reference documentation should first give a categorized list of
the contents of the module using ``autosummary::`` directives, and
after that explain points essential for understanding the use of the
module.
Tutorial-style documentation with extensive examples should be
separate, and put under ``doc/source/tutorial/``
See the existing Scipy submodules for guidance.
For further details on Numpy distutils, see:
https://github.com/numpy/numpy/blob/master/doc/DISTUTILS.rst.txt
Useful links, FAQ, checklist
============================
Checklist before submitting a PR
--------------------------------
- Are there unit tests with good code coverage?
- Do all public function have docstrings including examples?
- Is the code style correct (PEP8, pyflakes)
- Is the new functionality tagged with ``.. versionadded:: X.Y.Z`` (with
X.Y.Z the version number of the next release - can be found in setup.py)?
- Is the new functionality mentioned in the release notes of the next
release?
- Is the new functionality added to the reference guide?
- In case of larger additions, is there a tutorial or more extensive
module-level description?
- In case compiled code is added, is it integrated correctly via setup.py
(and preferably also Bento configuration files - bento.info and bscript)?
- If you are a first-time contributor, did you add yourself to THANKS.txt?
Please note that this is perfectly normal and desirable - the aim is to
give every single contributor credit, and if you don't add yourself it's
simply extra work for the reviewer (or worse, the reviewer may forget).
- Did you check that the code can be distributed under a BSD license?
Useful SciPy documents
----------------------
- The `how to document`_ guidelines
- NumPy/SciPy `testing guidelines`_
- `SciPy API`_
- SciPy `maintainers`_
- NumPy/SciPy `git workflow`_
FAQ
---
*I based my code on existing Matlab/R/... code I found online, is this OK?*
It depends. SciPy is distributed under a BSD license, so if the code that you
based your code on is also BSD licensed or has a BSD-compatible license (MIT,
Apache, ...) then it's OK. Code which is GPL-licensed, has no clear license,
requires citation or is free for academic use only can't be included in SciPy.
Therefore if you copied existing code with such a license or made a direct
translation to Python of it, your code can't be included. See also `license
compatibility`_.
*Why is SciPy under the BSD license and not, say, the GPL?*
Like Python, SciPy uses a "permissive" open source license, which allows
proprietary re-use. While this allows companies to use and modify the software
without giving anything back, it is felt that the larger user base results in
more contributions overall, and companies often publish their modifications
anyway, without being required to. See John Hunter's `BSD pitch`_.
*How do I set up a development version of SciPy in parallel to a released
version that I use to do my job/research?*
One simple way to achieve this is to install the released version in
site-packages, by using a binary installer or pip for example, and set
up the development version in a virtualenv. First install
`virtualenv`_ (optionally use `virtualenvwrapper`_), then create your
virtualenv (named scipy-dev here) with::
$ virtualenv scipy-dev
Now, whenever you want to switch to the virtual environment, you can use the
command ``source scipy-dev/bin/activate``, and ``deactivate`` to exit from the
virtual environment and back to your previous shell. With scipy-dev
activated, install first Scipy's dependencies::
$ pip install Numpy Nose Cython
After that, you can install a development version of Scipy, for example via::
$ python setup.py install
The installation goes to the virtual environment.
*How do I set up an in-place build for development*
For development, you can set up an in-place build so that changes made to
``.py`` files have effect without rebuild. First, run::
$ python setup.py build_ext -i
Then you need to point your PYTHONPATH environment variable to this directory.
Some IDEs (Spyder for example) have utilities to manage PYTHONPATH. On Linux
and OSX, you can run the command::
$ export PYTHONPATH=$PWD
and on Windows
$ set PYTHONPATH=/path/to/scipy
Now editing a Python source file in SciPy allows you to immediately
test and use your changes (in ``.py`` files), by simply restarting the
interpreter.
*Can I use a programming language other than Python to speed up my code?*
Yes. The languages used in SciPy are Python, Cython, C, C++ and Fortran. All
of these have their pros and cons. If Python really doesn't offer enough
performance, one of those languages can be used. Important concerns when
using compiled languages are maintainability and portability. For
maintainability, Cython is clearly preferred over C/C++/Fortran. Cython and C
are more portable than C++/Fortran. A lot of the existing C and Fortran code
in SciPy is older, battle-tested code that was only wrapped in (but not
specifically written for) Python/SciPy. Therefore the basic advice is: use
Cython. If there's specific reasons why C/C++/Fortran should be preferred,
please discuss those reasons first.
*How do I debug code written in C/C++/Fortran inside Scipy?*
The easiest way to do this is to first write a Python script that
invokes the C code whose execution you want to debug. For instance
``mytest.py``::
from scipy.special import hyp2f1
print(hyp2f1(5.0, 1.0, -1.8, 0.95))
Now, you can run::
gdb --args python runtests.py -g --python mytest.py
If you didn't compile with debug symbols enabled before, remove the
``build`` directory first. While in the debugger::
(gdb) break cephes_hyp2f1
(gdb) run
The execution will now stop at the corresponding C function and you
can step through it as usual. Instead of plain ``gdb`` you can of
course use your favourite alternative debugger; run it on the
``python`` binary with arguments ``runtests.py -g --python mytest.py``.
*How do I enable additional tests in Scipy?*
Some of the tests in Scipy's test suite are very slow and not enabled
by default. You can run the full suite via::
$ python runtests.py -g -m full
This invokes the test suite ``import scipy; scipy.test("full")``,
enabling also slow tests.
There is an additional level of very slow tests (several minutes),
which are disabled also in this case. They can be enabled by setting
the environment variable ``SCIPY_XSLOW=1`` before running the test
suite.
*How do I write tests with test generators?*
The Nose_ test framework supports so-called test generators, which can come
useful if you need to have multiple tests where just a parameter changes.
Using test generators so that they are more useful than harmful is tricky, and
we recommend the following pattern::
def test_something():
some_array = (...)
def check(some_param):
c = compute_result(some_array, some_param)
known_result = (...)
assert_allclose(c, known_result)
for some_param in ['a', 'b', 'c']:
yield check, some_param
We require the following:
- All asserts and all computation that is tested must only be reached after a
yield. (Rationale: the generator body is part of no test, and a failure in it
will show neither the test name nor for what parameters the test failed.)
- Arrays must not be passed as yield parameters. Either use variables from
outer scope (eg. with some index passed to yield), or capsulate test data to
a class with a sensible ``__repr__``. (Rationale: Nose truncates the printed
form of arrays in test output, and this makes it impossible to know for what
parameters a test failed. Arrays are big, and clutter test output
unnecessarily.)
- Test generators cannot be used in test classes inheriting from
unittest.TestCase; either use object as base class, or use standalone test
functions. (Rationale: Nose does not run test generators in
TestCase-inheriting classes.)
If in doubt, do not use test generators. You can track for what parameter
things failed also by passing ``err_msg=repr((param1, param2, ...))`` to the
various assert functions.
.. _scikit-learn: http://scikit-learn.org
.. _scikit-image: http://scikit-image.org/
.. _statsmodels: http://statsmodels.sourceforge.net/
.. _testing guidelines: https://github.com/numpy/numpy/blob/master/doc/TESTS.rst.txt
.. _how to document: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
.. _PEP8: http://www.python.org/dev/peps/pep-0008/
.. _pep8 package: http://pypi.python.org/pypi/pep8
.. _pyflakes: http://pypi.python.org/pypi/pyflakes
.. _SciPy API: http://docs.scipy.org/doc/scipy/reference/api.html
.. _git workflow: http://docs.scipy.org/doc/numpy/dev/gitwash/index.html
.. _Github help pages: https://help.github.com/articles/set-up-git/
.. _maintainers: https://github.com/scipy/scipy/blob/master/doc/MAINTAINERS.rst.txt
.. _issue list: https://github.com/scipy/scipy/issues
.. _Github: https://github.com/scipy/scipy
.. _scipy.org: http://scipy.org/
.. _scipy.github.com: http://scipy.github.com/
.. _scipy.org-new: https://github.com/scipy/scipy.org-new
.. _documentation wiki: http://docs.scipy.org/scipy/Front%20Page/
.. _SciPy Central: http://scipy-central.org/
.. _license compatibility: http://www.scipy.org/License_Compatibility
.. _doctest: http://www.doughellmann.com/PyMOTW/doctest/
.. _virtualenv: http://www.virtualenv.org/
.. _virtualenvwrapper: http://www.doughellmann.com/projects/virtualenvwrapper/
.. _bsd pitch: http://nipy.sourceforge.net/nipy/stable/faq/johns_bsd_pitch.html
.. _Nose: http://nose.readthedocs.org/en/latest/

View file

@ -1,401 +0,0 @@
Building and installing SciPy
+++++++++++++++++++++++++++++
See http://www.scipy.org/Installing_SciPy/
for more extensive (and possibly more up-to-date) build instructions.
.. Contents::
INTRODUCTION
============
It is *strongly* recommended that you use either a complete scientific Python
distribution or binary packages on your platform if they are available, in
particular on Windows and Mac OS X. You should not attempt to build SciPy if
you are not familiar with compiling software from sources.
Recommended distributions are:
- Enthought Canopy (https://www.enthought.com/products/canopy/)
- Anaconda (https://store.continuum.io/cshop/anaconda)
- Python(x,y) (http://code.google.com/p/pythonxy/)
- WinPython (http://code.google.com/p/winpython/)
PREREQUISITES
=============
SciPy requires the following software installed for your platform:
1) Python__ 2.6.x or newer
__ http://www.python.org
2) NumPy__ 1.6.2 or newer (note: SciPy trunk at times requires latest NumPy
trunk).
__ http://www.numpy.org/
3) If you want to build the documentation: Sphinx__ 1.1.0 or newer
__ http://sphinx-doc.org/
4) If you want to build SciPy master or other unreleased version from source
(Cython-generated C sources are included in official releases):
Cython__ 0.22 or newer
__ http://cython.org/
Windows
-------
Compilers
~~~~~~~~~
It is recommended to use the mingw__ compilers on Windows: you will need gcc
(C), g++ (C++) and g77 (Fortran) compilers.
__ http://www.mingw.org
Blas/Lapack
~~~~~~~~~~~
Blas/Lapack are core routines for linear algebra (vector/matrix operations).
You should use ATLAS__ with a full LAPACK, or simple BLAS/LAPACK built with g77
from netlib__ sources. Building those libraries on windows may be difficult, as
they assume a unix-style environment. Please use the binaries if you don't feel
comfortable with cygwin, make and similar tools.
__ http://math-atlas.sourceforge.net/
__ http://www.netlib.org/lapack/
Mac OS X
--------
Compilers
~~~~~~~~~
It is recommended to use gcc. gcc is available for free when installing
`Xcode`_, the developer toolsuite on Mac OS X. You also need a fortran compiler,
which is not included with Xcode: you should use gfortran from this page:
http://r.research.att.com/tools/
Please do NOT use gfortran from `hpc.sourceforge.net <http://hpc.sourceforge.net>`_, it is known to generate
buggy scipy binaries.
.. _Xcode: http://developer.apple.com/TOOLS/xcode
Blas/Lapack
~~~~~~~~~~~
Mac OS X includes the Accelerate framework: it should be detected without any
intervention when building SciPy.
Linux
-----
Most common distributions include all the dependencies. Here are some
instructions for the most common ones:
Ubuntu >= 8.10
~~~~~~~~~~~~~~
You can get all the dependencies as follows::
sudo apt-get install python python-dev libatlas-base-dev gcc gfortran g++
Ubuntu < 8.10, Debian
~~~~~~~~~~~~~~~~~~~~~
You can get all the dependencies as follows::
sudo apt-get install python python-dev atlas3-base-dev gcc g77 g++
OpenSuse >= 10
~~~~~~~~~~~~~~
RHEL
~~~~
Fedora Core
~~~~~~~~~~~
GETTING SCIPY
=============
For the latest information, see the web site:
http://www.scipy.org
Development version from Git
----------------------------
Use the command::
git clone https://github.com/scipy/scipy.git
Before building and installing from git, remove the old installation
(e.g. in /usr/lib/python3.4/site-packages/scipy or
$HOME/lib/python3.4/site-packages/scipy). Then type::
cd scipy
git clean -xdf
python setup.py install
Documentation
-------------
Type::
cd scipy
python setup.py build_sphinx
INSTALLATION
============
First make sure that all SciPy prerequisites are installed and working
properly. Then be sure to remove any old SciPy installations (e.g.
/usr/lib/python3.4/site-packages/scipy or $HOME/lib/python3.4/
site-packages/scipy). On windows, if you installed scipy previously from a
binary, use the remove facility from the add/remove software panel, or remote
the scipy directory by hand if you installed from sources (e.g.
C:\Python34\Lib\site-packages\scipy for python 3.4).
From tarballs
-------------
Unpack ``SciPy-<version>.tar.gz``, change to the ``SciPy-<version>/``
directory, and run
::
python setup.py install
This may take several minutes to an hour depending on the speed of your
computer. To install to a user-specific location instead, run::
python setup.py install --prefix=$MYDIR
where $MYDIR is, for example, $HOME or $HOME/usr.
** Note 1: On Unix, you should avoid installing in /usr, but rather in
/usr/local or somewhere else. /usr is generally 'owned' by your package
manager, and you may overwrite a packaged scipy this way.
TESTING
=======
To test SciPy after installation (highly recommended), execute in Python
>>> import scipy
>>> scipy.test()
To run the full test suite use
>>> scipy.test('full')
Please note that you must have version 0.10 or later of the 'nose' test
framework installed in order to run the tests. More information about nose is
available on the website__.
__ http://somethingaboutorange.com/mrl/projects/nose/
COMPILER NOTES
==============
Note that SciPy is developed mainly using GNU compilers. Compilers from
other vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Portland,
Lahey, HP, IBM are supported in the form of community feedback.
gcc__ compiler is recommended. gcc 3.x and 4.x are known to work.
If building on OS X, you should use the provided gcc by xcode tools, and the
gfortran compiler available here:
http://r.research.att.com/tools/
You can specify which Fortran compiler to use by using the following
install command::
python setup.py config_fc --fcompiler=<Vendor> install
To see a valid list of <Vendor> names, run::
python setup.py config_fc --help-fcompiler
IMPORTANT: It is highly recommended that all libraries that scipy uses (e.g.
blas and atlas libraries) are built with the same Fortran compiler. In most
cases, if you mix compilers, you will not be able to import scipy at best, have
crashes and random results at worse.
__ http://gcc.gnu.org/
Using non-GNU Fortran compiler with gcc/g77 compiled Atlas/Lapack libraries
---------------------------------------------------------------------------
When Atlas/Lapack libraries are compiled with GNU compilers but
one wishes to build scipy with some non-GNU Fortran compiler then
linking extension modules may require -lg2c. You can specify it
in installation command line as follows::
python setup.py build build_ext -lg2c install
If using non-GNU C compiler or linker, the location of g2c library can
be specified in a similar manner using -L/path/to/libg2c.a after
build_ext command.
Intel Fortran Compiler
----------------------
Note that code compiled by the Intel Fortran Compiler (IFC) is not
binary compatible with code compiled by g77. Therefore, when using IFC,
all Fortran codes used in SciPy must be compiled with IFC. This also
includes the LAPACK, BLAS, and ATLAS libraries. Using GCC for compiling
C code is OK. IFC version 5.0 is not supported (because it has bugs that
cause SciPy's tests to segfault).
Minimum IFC flags for building LAPACK and ATLAS are
::
-FI -w90 -w95 -cm -O3 -unroll
Also consult 'ifc -help' for additional optimization flags suitable
for your computers CPU.
If you want to have the LAPACK tests pass the 'ieee' compliancy test, you have to
use the -mp (='more precise') compiler option. Note that there's a significant hit
on the resulting performance though, reducing it to almost GNU level, but not quite.
When finishing LAPACK build, you must recompile ?lamch.f, xerbla.f
with optimization disabled (otherwise infinite loops occur when using
these routines)::
make lapacklib # in /path/to/src/LAPACK/
cd SRC
ifc -FI -w90 -w95 -cm -O0 -c ?lamch.f xerbla.f
cd ..
make lapacklib
KNOWN INSTALLATION PROBLEMS
===========================
BLAS sources shipped with LAPACK are incomplete
-----------------------------------------------
Some distributions (e.g. Redhat Linux 7.1) provide BLAS libraries that
are built from such incomplete sources and therefore cause import
errors like
::
ImportError: .../fblas.so: undefined symbol: srotmg_
Fix:
Use ATLAS or the official release of BLAS libraries.
LAPACK library provided by ATLAS is incomplete
----------------------------------------------
You will notice it when getting import errors like
::
ImportError: .../flapack.so : undefined symbol: sgesdd_
To be sure that SciPy is built against a complete LAPACK, check the
size of the file liblapack.a -- it should be about 6MB. The location
of liblapack.a is shown by executing
::
python /lib/python3.4/site-packages/numpy/distutils/system_info.py
(or the appropriate installation directory).
To fix: follow the instructions in
http://math-atlas.sourceforge.net/errata.html#completelp
to create a complete liblapack.a. Then copy liblapack.a to the same
location where libatlas.a is installed and retry with scipy build.
Using non-GNU Fortran Compiler
------------------------------
If import scipy shows a message
::
ImportError: undefined symbol: s_wsfe
and you are using non-GNU Fortran compiler, then it means that any of
the (may be system provided) Fortran libraries such as LAPACK or BLAS
were compiled with g77. See also compilers notes above.
Recommended fix: Recompile all Fortran libraries with the same Fortran
compiler and rebuild/reinstall scipy.
Another fix: See `Using non-GNU Fortran compiler with gcc/g77 compiled
Atlas/Lapack libraries` section above.
TROUBLESHOOTING
===============
If you experience problems when building/installing/testing SciPy, you
can ask help from scipy-user@scipy.org or scipy-dev@scipy.org mailing
lists. Please include the following information in your message:
NOTE: You can generate some of the following information (items 1-5,7)
in one command::
python -c 'from numpy.f2py.diagnose import run; run()'
1) Platform information::
python -c 'import os, sys; print(os.name, sys.platform)'
uname -a
OS, its distribution name and version information
etc.
2) Information about C,C++,Fortran compilers/linkers as reported by
the compilers when requesting their version information, e.g.,
the output of
::
gcc -v
g77 --version
3) Python version::
python -c 'import sys; print(sys.version)'
4) NumPy version::
python -c 'import numpy; print(numpy.__version__)'
5) ATLAS version, the locations of atlas and lapack libraries, building
information if any. If you have ATLAS version 3.3.6 or newer, then
give the output of the last command in
::
cd scipy/Lib/linalg
python setup_atlas_version.py build_ext --inplace --force
python -c 'import atlas_version'
7) The output of the following commands
::
python INSTALLDIR/numpy/distutils/system_info.py
where INSTALLDIR is, for example, /usr/lib/python3.4/site-packages/.
8) Feel free to add any other relevant information.
For example, the full output (both stdout and stderr) of the SciPy
installation command can be very helpful. Since this output can be
rather large, ask before sending it into the mailing list (or
better yet, to one of the developers, if asked).
9) In case of failing to import extension modules, the output of
::
ldd /path/to/ext_module.so
can be useful.
You may find the following notes useful:
http://www.tuxedo.org/~esr/faqs/smart-questions.html
http://www.chiark.greenend.org.uk/~sgtatham/bugs.html

View file

@ -1,374 +0,0 @@
Building and installing SciPy
+++++++++++++++++++++++++++++
See http://www.scipy.org/scipy/scipy/wiki/GetCode
for updates of this document.
.. Contents::
INTRODUCTION
============
It is *strongly* recommended that you use the binary packages on your platform
if they are available, in particular on Windows and Mac OS X. You should not
attempt to build SciPy if you are not familiar with compiling softwares from
sources.
PREREQUISITES
=============
SciPy requires the following software installed for your platform:
1) Python__ 2.4.x or newer
__ http://www.python.org
2) NumPy__ 1.4.1 or newer (note: SciPy trunk at times requires latest NumPy
trunk).
__ http://www.numpy.org/
Windows
-------
Compilers
~~~~~~~~~
It is recommended to use the mingw__ compilers on Windows: you will need gcc
(C), g++ (C++) and g77 (Fortran) compilers.
__ http://www.mingw.org
Blas/Lapack
~~~~~~~~~~~
Blas/Lapack are core routines for linear algebra (vector/matrix operations).
You should use ATLAS__ with a full LAPACK, or simple BLAS/LAPACK built with g77
from netlib__ sources. Building those libraries on windows may be difficult, as
they assume a unix-style environment. Please use the binaries if you don't feel
comfortable with cygwin, make and similar tools.
__ http://math-atlas.sourceforge.net/
__ http://www.netlib.org/lapack/
Mac OS X
--------
Compilers
~~~~~~~~~
It is recommended to use gcc. gcc is available for free when installing
Xcode__, the developer toolsuite on Mac OS X. You also need a fortran compiler,
which is not included with Xcode: you should use gfortran from this page:
__ http://r.research.att.com/tools/
Please do NOT use gfortran from hpc.sourceforge.net, it is known to generate
buggy scipy binaries.
__Xcode: http://developer.apple.com/TOOLS/xcode
Blas/Lapack
~~~~~~~~~~~
Mac OS X includes the Accelerate framework: it should be detected without any
intervention when building SciPy.
Linux
-----
Most common distributions include all the dependencies. Here are some
instructions for the most common ones:
Ubuntu >= 8.10
~~~~~~~~~~~~~~
You can get all the dependencies as follows::
sudo apt-get install python python-dev libatlas3-base-dev gcc gfortran g++
Ubuntu < 8.10, Debian
~~~~~~~~~~~~~~~~~~~~~
You can get all the dependencies as follows::
sudo apt-get install python python-dev atlas3-base-dev gcc g77 g++
OpenSuse >= 10
~~~~~~~~~~~~~~
RHEL
~~~~
Fedora Core
~~~~~~~~~~~
GETTING SCIPY
=============
For the latest information, see the web site:
http://www.scipy.org
Development version from Subversion (SVN)
-----------------------------------------
Use the command::
svn co http://svn.scipy.org/svn/scipy/trunk scipy
Before building and installing from SVN, remove the old installation
(e.g. in /usr/lib/python2.4/site-packages/scipy or
$HOME/lib/python2.4/site-packages/scipy). Then type::
cd scipy
rm -rf build
python setup.py install
INSTALLATION
============
First make sure that all SciPy prerequisites are installed and working
properly. Then be sure to remove any old SciPy installations (e.g.
/usr/lib/python2.4/site-packages/scipy or $HOME/lib/python2.4/
site-packages/scipy). On windows, if you installed scipy previously from a
binary, use the remove facility from the add/remove softwares panel, or remote
the scipy directory by hand if you installed from sources (e.g.
C:\Python24\Lib\site-packages\scipy for python 2.4).
From tarballs
-------------
Unpack ``SciPy-<version>.tar.gz``, change to the ``SciPy-<version>/``
directory, and run
::
python setup.py install
This may take several minutes to an hour depending on the speed of your
computer. To install to a user-specific location instead, run::
python setup.py install --prefix=$MYDIR
where $MYDIR is, for example, $HOME or $HOME/usr.
** Note 1: On Unix, you should avoid installing in /usr, but rather in
/usr/local or somewhere else. /usr is generally 'owned' by your package
manager, and you may overwrite a packaged scipy this way.
TESTING
=======
To test SciPy after installation (highly recommended), execute in Python
>>> import scipy
>>> scipy.test()
To run the full test suite use
>>> scipy.test('full')
Please note that you must have version 0.10 or later of the 'nose' test
framework installed in order to run the tests. More information about nose is
available on the website__.
__ http://somethingaboutorange.com/mrl/projects/nose/
COMPILER NOTES
==============
Note that SciPy is developed mainly using GNU compilers. Compilers from
other vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Porland,
Lahey, HP, IBM are supported in the form of community feedback.
gcc__ compiler is recommended. gcc 3.x and 4.x are known to work.
If building on OS X, you should use the provided gcc by xcode tools, and the
gfortran compiler available here:
http://r.research.att.com/tools/
You can specify which Fortran compiler to use by using the following
install command::
python setup.py config_fc --fcompiler=<Vendor> install
To see a valid list of <Vendor> names, run::
python setup.py config_fc --help-fcompiler
IMPORTANT: It is highly recommended that all libraries that scipy uses (e.g.
blas and atlas libraries) are built with the same Fortran compiler. In most
cases, if you mix compilers, you will not be able to import scipy at best, have
crashes and random results at worse.
__ http://gcc.gnu.org/
Using non-GNU Fortran compiler with gcc/g77 compiled Atlas/Lapack libraries
---------------------------------------------------------------------------
When Atlas/Lapack libraries are compiled with GNU compilers but
one wishes to build scipy with some non-GNU Fortran compiler then
linking extension modules may require -lg2c. You can specify it
in installation command line as follows::
python setup.py build build_ext -lg2c install
If using non-GNU C compiler or linker, the location of g2c library can
be specified in a similar manner using -L/path/to/libg2c.a after
build_ext command.
Intel Fortran Compiler
----------------------
Note that code compiled by the Intel Fortran Compiler (IFC) is not
binary compatible with code compiled by g77. Therefore, when using IFC,
all Fortran codes used in SciPy must be compiled with IFC. This also
includes the LAPACK, BLAS, and ATLAS libraries. Using GCC for compiling
C code is OK. IFC version 5.0 is not supported (because it has bugs that
cause SciPy's tests to segfault).
Minimum IFC flags for building LAPACK and ATLAS are
::
-FI -w90 -w95 -cm -O3 -unroll
Also consult 'ifc -help' for additional optimization flags suitable
for your computers CPU.
When finishing LAPACK build, you must recompile ?lamch.f, xerbla.f
with optimization disabled (otherwise infinite loops occur when using
these routines)::
make lapacklib # in /path/to/src/LAPACK/
cd SRC
ifc -FI -w90 -w95 -cm -O0 -c ?lamch.f xerbla.f
cd ..
make lapacklib
KNOWN INSTALLATION PROBLEMS
===========================
BLAS sources shipped with LAPACK are incomplete
-----------------------------------------------
Some distributions (e.g. Redhat Linux 7.1) provide BLAS libraries that
are built from such incomplete sources and therefore cause import
errors like
::
ImportError: .../fblas.so: undefined symbol: srotmg_
Fix:
Use ATLAS or the official release of BLAS libraries.
LAPACK library provided by ATLAS is incomplete
----------------------------------------------
You will notice it when getting import errors like
::
ImportError: .../flapack.so : undefined symbol: sgesdd_
To be sure that SciPy is built against a complete LAPACK, check the
size of the file liblapack.a -- it should be about 6MB. The location
of liblapack.a is shown by executing
::
python /lib/python2.4/site-packages/numpy/distutils/system_info.py
(or the appropriate installation directory).
To fix: follow the instructions in
http://math-atlas.sourceforge.net/errata.html#completelp
to create a complete liblapack.a. Then copy liblapack.a to the same
location where libatlas.a is installed and retry with scipy build.
Using non-GNU Fortran Compiler
------------------------------
If import scipy shows a message
::
ImportError: undefined symbol: s_wsfe
and you are using non-GNU Fortran compiler, then it means that any of
the (may be system provided) Fortran libraries such as LAPACK or BLAS
were compiled with g77. See also compilers notes above.
Recommended fix: Recompile all Fortran libraries with the same Fortran
compiler and rebuild/reinstall scipy.
Another fix: See `Using non-GNU Fortran compiler with gcc/g77 compiled
Atlas/Lapack libraries` section above.
TROUBLESHOOTING
===============
If you experience problems when building/installing/testing SciPy, you
can ask help from scipy-user@scipy.org or scipy-dev@scipy.org mailing
lists. Please include the following information in your message:
NOTE: You can generate some of the following information (items 1-5,7)
in one command::
python -c 'from numpy.f2py.diagnose import run; run()'
1) Platform information::
python -c 'import os,sys;print os.name,sys.platform'
uname -a
OS, its distribution name and version information
etc.
2) Information about C,C++,Fortran compilers/linkers as reported by
the compilers when requesting their version information, e.g.,
the output of
::
gcc -v
g77 --version
3) Python version::
python -c 'import sys;print sys.version'
4) NumPy version::
python -c 'import numpy;print numpy.__version__'
5) ATLAS version, the locations of atlas and lapack libraries, building
information if any. If you have ATLAS version 3.3.6 or newer, then
give the output of the last command in
::
cd scipy/Lib/linalg
python setup_atlas_version.py build_ext --inplace --force
python -c 'import atlas_version'
7) The output of the following commands
::
python INSTALLDIR/numpy/distutils/system_info.py
where INSTALLDIR is, for example, /usr/lib/python2.4/site-packages/.
8) Feel free to add any other relevant information.
For example, the full output (both stdout and stderr) of the SciPy
installation command can be very helpful. Since this output can be
rather large, ask before sending it into the mailing list (or
better yet, to one of the developers, if asked).
9) In case of failing to import extension modules, the output of
::
ldd /path/to/ext_module.so
can be useful.
You may find the following notes useful:
http://www.tuxedo.org/~esr/faqs/smart-questions.html
http://www.chiark.greenend.org.uk/~sgtatham/bugs.html

View file

@ -1,9 +0,0 @@
The Subversion tree for this distribution contains the latest code.
It can be downloaded using the subversion client as
svn co http://svn.scipy.org/svn/scipy/trunk scipy
which will create a directory named scipy in your current directory
and fill it with the current version of scipy.

View file

@ -1,50 +0,0 @@
Copyright (c) 2001, 2002 Enthought, Inc.
All rights reserved.
<<<<<<< HEAD
Copyright (c) 2003-2009 SciPy Developers.
=======
Copyright (c) 2003-2012 SciPy Developers.
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
<<<<<<< HEAD
c. Neither the name of the Enthought nor the names of its contributors
=======
c. Neither the name of Enthought nor the names of the SciPy Developers
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
<<<<<<< HEAD
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
=======
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1,47 +0,0 @@
<<<<<<< HEAD
# Use .add_data_files and .add_data_dir methods in a appropriate
# setup.py files to include non-python files such as documentation,
# data, etc files to distribution. Avoid using MANIFEST.in for that.
#
include MANIFEST.in
include *.txt
include setupscons.py
include setupegg.py
include setup.py
include scipy/*.py
# Adding scons build relateed files not found by distutils
recursive-include scipy SConstruct SConscript
# Add documentation: we don't use add_data_dir since we do not want to include
# this at installation, only for sdist-generated tarballs
include doc/Makefile doc/postprocess.py
recursive-include doc/release *
recursive-include doc/source *
recursive-include doc/sphinxext *
prune scipy/special/tests/data/boost
=======
include MANIFEST.in
include *.txt
# Top-level build scripts
include setup.py setupegg.py bscript bento.info
# All source files
recursive-include scipy *
# All documentation
recursive-include doc *
# Add build and testing tools
include tox.ini
recursive-include tools *
# Cached Cython signatures
include cythonize.dat
# Exclude what we don't want to include
recursive-exclude scipy/linalg/src/id_dist/src *_subr_*.f
prune benchmarks/env
prune benchmarks/results
prune benchmarks/html
prune benchmarks/scipy
prune scipy/special/tests/data/boost
prune scipy/special/tests/data/gsl
prune doc/build
prune doc/source/generated
prune */__pycache__
global-exclude *.pyc *~ *.bak *.swp *.pyo
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1,40 +0,0 @@
Metadata-Version: 1.0
Name: scipy
Version: 0.8.0
Summary: SciPy: Scientific Library for Python
Home-page: http://www.scipy.org
Author: SciPy Developers
Author-email: scipy-dev@scipy.org
License: BSD
Download-URL: http://sourceforge.net/project/showfiles.php?group_id=27747&package_id=19531
Description: SciPy (pronounced "Sigh Pie") is open-source software for mathematics,
science, and engineering. The SciPy library
depends on NumPy, which provides convenient and fast N-dimensional
array manipulation. The SciPy library is built to work with NumPy
arrays, and provides many user-friendly and efficient numerical
routines such as routines for numerical integration and optimization.
Together, they run on all popular operating systems, are quick to
install, and are free of charge. NumPy and SciPy are easy to use,
but powerful enough to be depended upon by some of the world's
leading scientists and engineers. If you need to manipulate
numbers on a computer and display or publish the results,
give SciPy a try!
Platform: Windows
Platform: Linux
Platform: Solaris
Platform: Mac OS-X
Platform: Unix
Classifier: Development Status :: 4 - Beta
Classifier: Intended Audience :: Science/Research
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved
Classifier: Programming Language :: C
Classifier: Programming Language :: Python
Classifier: Topic :: Software Development
Classifier: Topic :: Scientific/Engineering
Classifier: Operating System :: Microsoft :: Windows
Classifier: Operating System :: POSIX
Classifier: Operating System :: Unix
Classifier: Operating System :: MacOS

View file

@ -1,95 +0,0 @@
.. image:: https://travis-ci.org/scipy/scipy.png?branch=master
:target: https://travis-ci.org/scipy/scipy/
.. image:: https://coveralls.io/repos/scipy/scipy/badge.png?branch=master
:target: https://coveralls.io/r/scipy/scipy?branch=master
=====
SciPy
=====
.. contents::
What is SciPy?
--------------
SciPy (pronounced "Sigh Pie") is open-source software for mathematics,
science, and engineering. It includes modules for statistics, optimization,
integration, linear algebra, Fourier transforms, signal and image processing,
ODE solvers, and more. It is also the name of a very popular conference on
scientific programming with Python.
The SciPy library depends on NumPy, which provides convenient and fast
N-dimensional array manipulation. The SciPy library is built to work with
NumPy arrays, and provides many user-friendly and efficient numerical routines
such as routines for numerical integration and optimization. Together, they
run on all popular operating systems, are quick to install, and are free of
charge. NumPy and SciPy are easy to use, but powerful enough to be depended
upon by some of the world's leading scientists and engineers. If you need to
manipulate numbers on a computer and display or publish the results, give
SciPy a try!
Installation
------------
For installation instructions, see ``INSTALL.txt``.
Documentation
-------------
Scipy documentation is available on the web:
http://docs.scipy.org
How to generate the HTML documentation, see ``doc/README.txt``.
Web sites
---------
The user's site is:
http://www.scipy.org/
Mailing Lists
-------------
Please see the developer's list here:
http://projects.scipy.org/mailman/listinfo/scipy-dev
Latest source code
------------------
The latest development version of Scipy's sources are always available at:
https://github.com/scipy/scipy
They can be downloaded as a zip file or using the Git client.
Bug reports
-----------
To search for bugs or report them, please use the Scipy Bug Tracker at:
https://github.com/scipy/scipy/issues
Developer information
---------------------
If you would like to take part in SciPy development, take a look
at ``HACKING.rst.txt``.
License information
-------------------
See the file ``LICENSE.txt`` for information on the history of this
software, terms & conditions for usage, and a DISCLAIMER OF ALL
WARRANTIES.

View file

@ -1,135 +0,0 @@
=================================================
Developing SciPy
=================================================
.. Contents::
What is SciPy?
--------------
SciPy (pronounced "Sigh Pie") is open-source software for mathematics,
science, and engineering. It includes modules for statistics, optimization,
integration, linear algebra, Fourier transforms, signal and image processing,
ODE solvers, and more. It is also the name of a very popular conference on
scientific programming with Python.
The SciPy library depends on NumPy, which provides convenient and fast
N-dimensional array manipulation. The SciPy library is built to work with
NumPy arrays, and provides many user-friendly and efficient numerical routines
such as routines for numerical integration and optimization. Together, they
run on all popular operating systems, are quick to install, and are free of
charge. NumPy and SciPy are easy to use, but powerful enough to be depended
upon by some of the world's leading scientists and engineers. If you need to
manipulate numbers on a computer and display or publish the results, give
SciPy a try!
SciPy structure
---------------
SciPy aims at being a robust and efficient "super-package" of a number
of modules, each of a non-trivial size and complexity. In order for
"SciPy integration" to work flawlessly, all SciPy modules must follow
certain rules that are described in this document. Hopefully this
document will be helpful for SciPy contributors and developers as a
basic reference about the structure of the SciPy package.
Currently SciPy consists of the following files and directories:
INSTALL.txt
SciPy prerequisites, installation, testing, and troubleshooting.
THANKS.txt
SciPy developers and contributors. Please keep it up to date!!
README.txt
SciPy structure (this document).
setup.py
Script for building and installing SciPy.
MANIFEST.in
Additions to distutils-generated SciPy tar-balls. Its usage is
deprecated.
scipy/
Contains SciPy __init__.py and the directories of SciPy modules.
SciPy modules
+++++++++++++
In the following, a *SciPy module* is defined as a Python package, say
xxx, that is located in the scipy/ directory. All SciPy modules should
follow the following conventions:
* Ideally, each SciPy module should be as self-contained as possible.
That is, it should have minimal dependencies on other packages or
modules. Even dependencies on other SciPy modules should be kept to a
minimum. A dependency on NumPy is of course assumed.
* Directory ``xxx/`` must contain
+ a file ``setup.py`` that defines
``configuration(parent_package='',top_path=None)`` function.
See below for more details.
+ a file ``info.py``. See below more details.
* Directory ``xxx/`` may contain
+ a directory ``tests/`` that contains files ``test_<name>.py``
corresponding to modules ``xxx/<name>{.py,.so,/}``. See below for
more details.
+ a file ``MANIFEST.in`` that may contain only ``include setup.py`` line.
DO NOT specify sources in MANIFEST.in, you must specify all sources
in setup.py file. Otherwise released SciPy tarballs will miss these sources.
+ a directory ``docs/`` for documentation.
For details, read:
http://projects.scipy.org/numpy/wiki/DistutilsDoc
Documentation
-------------
The documentation site is here
http://docs.scipy.org
Web sites
---------
The user's site is here
http://www.scipy.org/
The developer's site is here
http://projects.scipy.org/scipy/wiki
Mailing Lists
-------------
Please see the developer's list here
http://projects.scipy.org/mailman/listinfo/scipy-dev
Bug reports
-----------
To search for bugs, please use the NIPY Bug Tracker at
http://projects.scipy.org/scipy/query
To report a bug, please use the NIPY Bug Tracker at
http://projects.scipy.org/scipy/newticket
License information
-------------------
See the file "LICENSE" for information on the history of this
software, terms & conditions for usage, and a DISCLAIMER OF ALL
WARRANTIES.

View file

@ -1,174 +0,0 @@
SciPy is an open source library of routines for science and engineering
using Python. It is a community project sponsored by Enthought, Inc.
SciPy originated with code contributions by Travis Oliphant, Pearu
Peterson, and Eric Jones. Travis Oliphant and Eric Jones each contributed
about half the initial code. Pearu Peterson developed f2py, which is the
integral to wrapping the many Fortran libraries used in SciPy.
Since then many people have contributed to SciPy, both in code development,
suggestions, and financial support. Below is a partial list. If you've
been left off, please email the "SciPy Developers List" <scipy-dev@scipy.org>.
Please add names as needed so that we can keep up with all the contributors.
Kumar Appaiah for Dolph Chebyshev window.
Nathan Bell for sparsetools, help with scipy.sparse and scipy.splinalg.
Robert Cimrman for UMFpack wrapper for sparse matrix module.
David M. Cooke for improvements to system_info, and LBFGSB wrapper.
Aric Hagberg for ARPACK wrappers, help with splinalg.eigen.
Chuck Harris for Zeros package in optimize (1d root-finding algorithms).
Prabhu Ramachandran for improvements to gui_thread.
Robert Kern for improvements to stats and bug-fixes.
Jean-Sebastien Roy for fmin_tnc code which he adapted from Stephen Nash's
original Fortran.
Ed Schofield for Maximum entropy and Monte Carlo modules, help with
sparse matrix module.
Travis Vaught for numerous contributions to annual conference and community
web-site and the initial work on stats module clean up.
Jeff Whitaker for Mac OS X support.
David Cournapeau for bug-fixes, refactoring of fftpack and cluster,
<<<<<<< HEAD
implementing the numscons build, building Windows binaries and
adding single precision FFT.
=======
implementing the numscons and Bento build support, building Windows
binaries and adding single precision FFT.
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
Damian Eads for hierarchical clustering, dendrogram plotting,
distance functions in spatial package, vq documentation.
Anne Archibald for kd-trees and nearest neighbor in scipy.spatial.
Pauli Virtanen for Sphinx documentation generation, online documentation
framework and interpolation bugfixes.
<<<<<<< HEAD
Josef Perktold for major improvements to scipy.stats and its test suite and
=======
Josef Perktold for major improvements to scipy.stats and its test suite and
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
fixes and tests to optimize.curve_fit and leastsq.
David Morrill for getting the scoreboard test system up and running.
Louis Luangkesorn for providing multiple tests for the stats module.
Jochen Kupper for the zoom feature in the now-deprecated plt plotting module.
Tiffany Kamm for working on the community web-site.
Mark Koudritsky for maintaining the web-site.
Andrew Straw for help with the web-page, documentation, packaging,
testing and work on the linalg module.
Stefan van der Walt for numerous bug-fixes, testing and documentation.
Jarrod Millman for release management, community coordination, and code
clean up.
Pierre Gerard-Marchant for statistical masked array functionality.
Alan McIntyre for updating SciPy tests to use the new NumPy test framework.
Matthew Brett for work on the Matlab file IO, bug-fixes, and improvements
to the testing framework.
Gary Strangman for the scipy.stats package.
Tiziano Zito for generalized symmetric and hermitian eigenvalue problem
solver.
Chris Burns for bug-fixes.
Per Brodtkorb for improvements to stats distributions.
Neilen Marais for testing and bug-fixing in the ARPACK wrappers.
Johannes Loehnert and Bart Vandereycken for fixes in the linalg
module.
David Huard for improvements to the interpolation interface.
David Warde-Farley for converting the ndimage docs to ReST.
Uwe Schmitt for wrapping non-negative least-squares.
Ondrej Certik for Debian packaging.
Paul Ivanov for porting Numeric-style C code to the new NumPy API.
Ariel Rokem for contributions on percentileofscore fixes and tests.
Yosef Meller for tests in the optimization module.
<<<<<<< HEAD
=======
Ralf Gommers for release management, code clean up and improvements
to doc-string generation.
Bruce Southey for bug-fixes and improvements to scipy.stats.
Ernest Adrogué for the Skellam distribution.
Enzo Michelangeli for a fast kendall tau test.
David Simcha for a fisher exact test.
Warren Weckesser for bug-fixes, cleanups, and several new features.
Fabian Pedregosa for linear algebra bug-fixes, new features and refactoring.
Jake Vanderplas for wrapping ARPACK's generalized and shift-invert modes
and improving its tests.
Collin RM Stocks for wrapping pivoted QR decomposition.
Martin Teichmann for improving scipy.special.ellipk & agm accuracy,
and for linalg.qr_multiply.
Jeff Armstrong for discrete state-space and linear time-invariant functionality
in scipy.signal, and sylvester/riccati/lyapunov solvers in scipy.linalg.
Mark Wiebe for fixing type casting after changes in Numpy.
Andrey Smirnov for improvements to FIR filter design.
Anthony Scopatz for help with code review and merging.
Lars Buitinck for improvements to scipy.sparse and various other modules.
Scott Sinclair for documentation improvements and some bug fixes.
Gael Varoquaux for cleanups in scipy.sparse.
Skipper Seabold for a fix to special.gammainc.
Wes McKinney for a fix to special.gamma.
Thouis (Ray) Jones for bug fixes in ndimage.
Yaroslav Halchenko for a bug fix in ndimage.
Thomas Robitaille for the IDL 'save' reader.
Fazlul Shahriar for fixes to the NetCDF3 I/O.
Chris Jordan-Squire for bug fixes, documentation improvements and
scipy.special.logit & expit.
Christoph Gohlke for many bug fixes and help with Windows specific issues.
Jacob Silterra for cwt-based peak finding in scipy.signal.
Denis Laxalde for the unified interface to minimizers in scipy.optimize.
David Fong for the sparse LSMR solver.
Andreas Hilboll for adding several new interpolation methods.
Andrew Schein for improving the numerical precision of norm.logcdf().
Robert Gantner for improving expm() implementation.
Sebastian Werk for Halley's method in newton().
Bjorn Forsman for contributing signal.bode().
Tony S. Yu for ndimage improvements.
Jonathan J. Helmus for work on ndimage.
Alex Reinhart for documentation improvements.
Patrick Varilly for cKDTree improvements.
Sturla Molden for cKDTree improvements.
Nathan Crock for bug fixes.
Steven G. Johnson for Faddeeva W and erf* implementations.
Lorenzo Luengo for whosmat() in scipy.io.
Eric Moore for orthogonal polynomial recurrences in scipy.special.
Jacob Stevenson for the basinhopping optimization algorithm
Daniel Smith for sparse matrix functionality improvements
Gustav Larsson for a bug fix in convolve2d.
Alex Griffing for expm 2009, expm_multiply, expm_frechet,
trust region optimization methods, and sparse matrix onenormest
implementations, plus bugfixes.
Nils Werner for signal windowing and wavfile-writing improvements.
Kenneth L. Ho for the wrapper around the Interpolative Decomposition code.
Juan Luis Cano for refactorings in lti, sparse docs improvements and some
trivial fixes.
Pawel Chojnacki for simple documentation fixes.
Gert-Ludwig Ingold for contributions to special functions.
Joris Vankerschaver for multivariate Gaussian functionality.
Rob Falck for the SLSQP interface and linprog.
Jörg Dietrich for the k-sample Anderson Darling test.
Blake Griffith for improvements to scipy.sparse.
Andrew Nelson for scipy.optimize.differential_evolution.
Brian Newsom for work on ctypes multivariate integration.
Nathan Woods for advising on multivariate integration and building unit tests.
Brianna Laugher for bug fixes.
Johannes Kulick for the Dirichlet distribution.
Bastian Venthur for bug fixes.
Alex Rothberg for stats.combine_pvalues.
Brandon Liu for stats.combine_pvalues.
Clark Fitzgerald for namedtuple outputs in scipy.stats.
Florian Wilhelm for usage of RandomState in scipy.stats distributions.
Robert T. McGibbon for Levinson-Durbin Toeplitz solver, Hessian information
from L-BFGS-B.
Alex Conley for the Exponentially Modified Normal distribution.
Abraham Escalante for contributions to scipy.stats
Johannes Ballé for the generalized normal distribution.
Irvin Probst (ENSTA Bretagne) for pole placement.
Ian Henriksen for Cython wrappers for BLAS and LAPACK
Fukumu Tsutsumi for bug fixes.
J.J. Green for interpolation bug fixes.
François Magimel for documentation improvements.
Josh Levy-Kramer for the log survival function of the hypergeometric distribution
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
Institutions
------------
Enthought for providing resources and finances for development of SciPy.
Brigham Young University for providing resources for students to work on SciPy.
Agilent which gave a genereous donation for support of SciPy.
UC Berkeley for providing travel money and hosting numerous sprints.
The University of Stellenbosch for funding the development of
the SciKits portal.

View file

@ -1,57 +0,0 @@
=================================================
Development Plans for SciPy 1.0
=================================================
See http://www.scipy.org/scipy/scipy/wiki/DevelopmentPlan
for updates of this document.
.. Contents::
General
--------
* distributions heavy use of extract and insert (could use fancy indexing?) -- but we should wait until we learn how slow fancy indexing is....)
* Use of old Numeric C-API. Using it means an extra C-level function call, but ...
* Make use of type addition to extend certain ufuncs with cephes quad types
* Use finfo(foo).bar instead of limits.foo_bar (see r3358 and r3362)
* Comply with Python Style Guide
* use CamelCase for class names
* Improve testing (e.g., increased coverage)
Documentation
-------------
See http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines
* use new docstring format
Packages
--------
* consider reorganizing the namespace
* scipy.tests, scipy.misc, scipy.stsci
IO (scipy.io)
+++++++++++++
* io rewritten to use internal writing capabilities of arrays
Image Processing (scipy.ndimage)
++++++++++++++++++++++++++++++++
Statistical Analysis (scipy.stats)
++++++++++++++++++++++++++++++++++
* add statistical models

View file

@ -1,75 +0,0 @@
.. -*- rst -*-
================
SciPy benchmarks
================
Benchmarking Scipy with Airspeed Velocity.
Usage
-----
Airspeed Velocity manages building and Python virtualenvs by itself,
unless told otherwise. Some of the benchmarking features in
``runtests.py`` also tell ASV to use the Scipy compiled by
``runtests.py``. To run the benchmarks, you do not need to install a
development version of Scipy to your current Python environment.
Run a benchmark against currently checked out Scipy version (don't record the
result)::
python runtests.py --bench sparse.Arithmetic
Compare change in benchmark results to another branch::
python runtests.py --bench-compare master sparse.Arithmetic
Run ASV commands::
cd benchmarks
./run.py run --skip-existing-commits --steps 10 ALL
./run.py publish
./run.py preview
The ``run.py`` script sets up some environment variables and does other minor
maintenance jobs for you. The benchmark suite is runnable directly using the
``asv`` command.
More on how to use ``asv`` can be found in `ASV documentation`_
Command-line help is available as usual via ``asv --help`` and
``asv run --help``.
.. _ASV documentation: https://spacetelescope.github.io/asv/
Writing benchmarks
------------------
See `ASV documentation`_ for basics on how to write benchmarks.
Some things to consider:
- When importing things from Scipy on the top of the test files, do it as::
try:
from scipy.sparse.linalg import onenormest
except ImportError:
pass
The benchmark files need to be importable also when benchmarking old versions
of Scipy. The benchmarks themselves don't need any guarding against missing
features --- only the top-level imports.
- Try to keep the runtime of the benchmark reasonable.
- Use ASV's ``time_`` methods for benchmarking times rather than cooking up
time measurements via ``time.clock``, even if it requires some juggling when
writing the benchmark.
- Preparing arrays etc. should generally be put in the ``setup`` method rather
than the ``time_`` methods, to avoid counting preparation time together with
the time of the benchmarked operation.
- Use ``run_monitored`` from ``common.py`` if you need to measure memory usage.

View file

@ -1,69 +0,0 @@
{
// The version of the config file format. Do not change, unless
// you know what you are doing.
"version": 1,
// The name of the project being benchmarked
"project": "scipy",
// The project's homepage
"project_url": "http://scipy.org/scipylib/",
// The URL of the source code repository for the project being
// benchmarked
"repo": "https://github.com/scipy/scipy.git",
"dvcs": "git",
"branches": ["master", "maintenance/0.15.x", "maintenance/0.14.x"],
// The base URL to "how a commit for the project.
"show_commit_url": "http://github.com/scipy/scipy/commit/",
// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
"pythons": ["2.7"],
// The matrix of dependencies to test. Each key is the name of a
// package (in PyPI) and the values are version numbers. An empty
// list indicates to just test against the default (latest)
// version.
"matrix": {
"numpy": ["1.8.2"],
"Tempita": ["0.5.2"],
"Cython": ["0.22"],
"six": [],
},
// The directory (relative to the current directory) that benchmarks are
// stored in. If not provided, defaults to "benchmarks"
"benchmark_dir": "benchmarks",
// The directory (relative to the current directory) to cache the Python
// environments in. If not provided, defaults to "env"
"env_dir": "env",
"environment_type": "virtualenv",
"wheel_cache_size": 10,
// The directory (relative to the current directory) that raw benchmark
// results are stored in. If not provided, defaults to "results".
"results_dir": "results",
// The directory (relative to the current directory) that the html tree
// should be written to. If not provided, defaults to "html".
"html_dir": "html",
// The number of characters to retain in the commit hashes.
"hash_length": 8,
// The commits after which the regression search in `asv publish`
// should start looking for regressions. Dictionary whose keys are
// regexps matching to benchmark names, and values corresponding to
// the commit (exclusive) after which to start looking for
// regressions. The default is to start from the first commit
// with results. If the commit is `null`, regression detection is
// skipped for the matching benchmark.
"regressions_first_commits": {
"io_matlab\\.StructArr\\..*": "67c089a6", // structarrs weren't properly implemented before this
}
}

View file

@ -1,5 +0,0 @@
import numpy as np
import random
np.random.seed(1234)
random.seed(1234)

View file

@ -1,91 +0,0 @@
"""
Airspeed Velocity benchmark utilities
"""
from __future__ import division, absolute_import, print_function
import sys
import re
import time
import textwrap
import subprocess
class Benchmark(object):
"""
Base class with sensible options
"""
goal_time = 0.25
def run_monitored(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : float
Peak memory usage (rough estimate only) in bytes
"""
if not sys.platform.startswith('linux'):
raise RuntimeError("Peak memory monitoring only works on Linux")
code = textwrap.dedent(code)
process = subprocess.Popen([sys.executable, '-c', code])
peak_memusage = -1
start = time.time()
while True:
ret = process.poll()
if ret is not None:
break
with open('/proc/%d/status' % process.pid, 'r') as f:
procdata = f.read()
m = re.search('VmRSS:\s*(\d+)\s*kB', procdata, re.S | re.I)
if m is not None:
memusage = float(m.group(1)) * 1e3
peak_memusage = max(memusage, peak_memusage)
time.sleep(0.01)
process.wait()
duration = time.time() - start
if process.returncode != 0:
raise AssertionError("Running failed:\n%s" % code)
return duration, peak_memusage
def get_mem_info():
"""Get information about available memory"""
if not sys.platform.startswith('linux'):
raise RuntimeError("Memory information implemented only for Linux")
info = {}
with open('/proc/meminfo', 'r') as f:
for line in f:
p = line.split()
info[p[0].strip(':').lower()] = float(p[1]) * 1e3
return info
def set_mem_rlimit(max_mem=None):
"""
Set address space rlimit
"""
import resource
if max_mem is None:
mem_info = get_mem_info()
max_mem = int(mem_info['memtotal'] * 0.7)
cur_limit = resource.getrlimit(resource.RLIMIT_AS)
if cur_limit[0] > 0:
max_mem = min(max_mem, cur_limit[0])
resource.setrlimit(resource.RLIMIT_AS, (max_mem, cur_limit[1]))

View file

@ -1,112 +0,0 @@
""" Test functions for fftpack.basic module
"""
from __future__ import division, absolute_import, print_function
from numpy import arange, asarray, zeros, dot, exp, pi, double, cdouble
import numpy.fft
from numpy.random import rand
try:
from scipy.fftpack import ifft, fft, fftn, irfft, rfft
except ImportError:
pass
from .common import Benchmark
def random(size):
return rand(*size)
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n,dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w),x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n,dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w),x)/n
return y
class Fft(Benchmark):
params = [
[100, 256, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['real', 'cmplx'],
['scipy', 'numpy']
]
param_names = ['size', 'type', 'module']
def setup(self, size, cmplx, module):
if cmplx == 'cmplx':
self.x = random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
else:
self.x = random([size]).astype(double)
def time_fft(self, size, cmplx, module):
if module == 'numpy':
numpy.fft.fft(self.x)
else:
fft(self.x)
def time_ifft(self, size, cmplx, module):
if module == 'numpy':
numpy.fft.ifft(self.x)
else:
ifft(self.x)
class RFft(Benchmark):
params = [
[100, 256, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['scipy', 'numpy']
]
param_names = ['size', 'module']
def setup(self, size, module):
self.x = random([size]).astype(double)
def time_rfft(self, size, module):
if module == 'numpy':
numpy.fft.rfft(self.x)
else:
rfft(self.x)
def time_irfft(self, size, module):
if module == 'numpy':
numpy.fft.irfft(self.x)
else:
irfft(self.x)
class Fftn(Benchmark):
params = [
["100x100", "1000x100", "256x256", "512x512"],
['real', 'cmplx'],
['scipy', 'numpy']
]
param_names = ['size', 'type', 'module']
def setup(self, size, cmplx, module):
size = map(int, size.split("x"))
if cmplx != 'cmplx':
self.x = random(size).astype(double)
else:
self.x = random(size).astype(cdouble)+random(size).astype(cdouble)*1j
def time_fftn(self, size, cmplx, module):
if module == 'numpy':
numpy.fft.fftn(self.x)
else:
fftn(self.x)

View file

@ -1,102 +0,0 @@
""" Benchmark functions for fftpack.pseudo_diffs module
"""
from __future__ import division, absolute_import, print_function
from numpy import arange, sin, cos, pi, exp, tanh, sign
try:
from scipy.fftpack import diff, fft, ifft, tilbert, hilbert, shift, fftfreq
except ImportError:
pass
from .common import Benchmark
def direct_diff(x,k=1,period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*2j*pi/period*n
if k < 0:
w = 1 / w**k
w[0] = 0.0
else:
w = w**k
if n > 2000:
w[250:n-250] = 0.0
return ifft(w*fx).real
def direct_tilbert(x,h=1,period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*h*2*pi/period*n
w[0] = 1
w = 1j/tanh(w)
w[0] = 0j
return ifft(w*fx)
def direct_hilbert(x):
fx = fft(x)
n = len(fx)
w = fftfreq(n)*n
w = 1j*sign(w)
return ifft(w*fx)
def direct_shift(x,a,period=None):
n = len(x)
if period is None:
k = fftfreq(n)*1j*n
else:
k = fftfreq(n)*2j*pi/period*n
return ifft(fft(x)*exp(k*a)).real
class Bench(Benchmark):
params = [
[100, 256, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['fft', 'direct'],
]
param_names = ['size', 'type']
def setup(self, size, type):
size = int(size)
x = arange(size)*2*pi/size
a = 1
self.a = a
if size < 2000:
self.f = sin(x)*cos(4*x)+exp(sin(3*x))
self.sf = sin(x+a)*cos(4*(x+a))+exp(sin(3*(x+a)))
else:
self.f = sin(x)*cos(4*x)
self.sf = sin(x+a)*cos(4*(x+a))
def time_diff(self, size, soltype):
if soltype == 'fft':
diff(self.f, 3)
else:
direct_diff(self.f,3)
def time_tilbert(self, size, soltype):
if soltype == 'fft':
tilbert(self.f, 1)
else:
direct_tilbert(self.f, 1)
def time_hilbert(self, size, soltype):
if soltype == 'fft':
hilbert(self.f)
else:
direct_hilbert(self.f)
def time_shift(self, size, soltype):
if soltype == 'fft':
shift(self.f, self.a)
else:
direct_shift(self.f, self.a)

View file

@ -1,220 +0,0 @@
from __future__ import division, absolute_import, print_function
import numpy as np
from .common import run_monitored, set_mem_rlimit, Benchmark
try:
from scipy.stats import spearmanr
except ImportError:
pass
try:
import scipy.interpolate as interpolate
except ImportError:
pass
class Leaks(Benchmark):
unit = "relative increase with repeats"
def track_leaks(self):
set_mem_rlimit()
# Setup temp file, make it fit in memory
repeats = [2, 5, 10, 50, 200]
peak_mems = []
for repeat in repeats:
code = """
import numpy as np
from scipy.interpolate import griddata
def func(x, y):
return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
points = np.random.rand(1000, 2)
values = func(points[:,0], points[:,1])
for t in range(%(repeat)d):
for method in ['nearest', 'linear', 'cubic']:
griddata(points, values, (grid_x, grid_y), method=method)
""" % dict(repeat=repeat)
_, peak_mem = run_monitored(code)
peak_mems.append(peak_mem)
corr, p = spearmanr(repeats, peak_mems)
if p < 0.05:
print("*"*79)
print("PROBABLE MEMORY LEAK")
print("*"*79)
else:
print("PROBABLY NO MEMORY LEAK")
return max(peak_mems) / min(peak_mems)
class BenchPPoly(Benchmark):
def setup(self):
np.random.seed(1234)
m, k = 55, 3
x = np.sort(np.random.random(m+1))
c = np.random.random((3, m))
self.pp = interpolate.PPoly(c, x)
npts = 100
self.xp = np.linspace(0, 1, npts)
def time_evaluation(self):
self.pp(self.xp)
class GridData(Benchmark):
param_names = ['n_grids', 'method']
params = [
[10j, 100j, 1000j],
['nearest', 'linear', 'cubic']
]
def setup(self, n_grids, method):
self.func = lambda x, y: x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
self.grid_x, self.grid_y = np.mgrid[0:1:n_grids, 0:1:n_grids]
self.points = np.random.rand(1000, 2)
self.values = self.func(self.points[:,0], self.points[:,1])
def time_evaluation(self, n_grids, method):
interpolate.griddata(self.points, self.values, (self.grid_x, self.grid_y), method=method)
class Interpolate1d(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100],
['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'],
]
def setup(self, n_samples, method):
self.x = np.arange(n_samples)
self.y = np.exp(-self.x/3.0)
def time_interpolate(self, n_samples, method):
interpolate.interp1d(self.x, self.y, kind=method)
class Interpolate2d(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100],
['linear', 'cubic', 'quintic'],
]
def setup(self, n_samples, method):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.arange(-r_samples, r_samples, 0.25)
self.xx, self.yy = np.meshgrid(self.x, self.y)
self.z = np.sin(self.xx**2+self.yy**2)
def time_interpolate(self, n_samples, method):
interpolate.interp2d(self.x, self.y, self.z, kind=method)
class Rbf(Benchmark):
param_names = ['n_samples', 'function']
params = [
[10, 50, 100],
['multiquadric', 'inverse', 'gaussian', 'linear', 'cubic', 'quintic', 'thin_plate']
]
def setup(self, n_samples, function):
self.x = np.arange(n_samples)
self.y = np.sin(self.x)
r_samples = n_samples / 2.
self.X = np.arange(-r_samples, r_samples, 0.25)
self.Y = np.arange(-r_samples, r_samples, 0.25)
self.z = np.exp(-self.X**2-self.Y**2)
def time_rbf_1d(self, n_samples, function):
interpolate.Rbf(self.x, self.y, function=function)
def time_rbf_2d(self, n_samples, function):
interpolate.Rbf(self.X, self.Y, self.z, function=function)
class UnivariateSpline(Benchmark):
param_names = ['n_samples', 'degree']
params = [
[10, 50, 100],
[3, 4, 5]
]
def setup(self, n_samples, degree):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.exp(-self.x**2) + 0.1 * np.random.randn(*self.x.shape)
def time_univariate_spline(self, n_samples, degree):
interpolate.UnivariateSpline(self.x, self.y, k=degree)
class BivariateSpline(Benchmark):
"""
Author: josef-pktd and scipy mailinglist example
'http://scipy-user.10969.n7.nabble.com/BivariateSpline-examples\
-and-my-crashing-python-td14801.html'
"""
param_names = ['n_samples']
params = [
[10, 20, 30]
]
def setup(self, n_samples):
x = np.arange(0, n_samples, 0.5)
y = np.arange(0, n_samples, 0.5)
x, y = np.meshgrid(x, y)
x = x.ravel()
y = y.ravel()
xmin = x.min()-1
xmax = x.max()+1
ymin = y.min()-1
ymax = y.max()+1
s = 1.1
self.yknots = np.linspace(ymin+s,ymax-s,10)
self.xknots = np.linspace(xmin+s,xmax-s,10)
self.z = np.sin(x) + 0.1*np.random.normal(size=x.shape)
self.x = x
self.y = y
def time_smooth_bivariate_spline(self, n_samples):
interpolate.SmoothBivariateSpline(self.x, self.y, self.z)
def time_lsq_bivariate_spline(self, n_samples):
interpolate.LSQBivariateSpline(self.x, self.y, self.z, self.xknots.flat, self.yknots.flat)
class Interpolate(Benchmark):
"""
Linear Interpolate in scipy and numpy
"""
param_names = ['n_samples', 'module']
params = [
[10, 50, 100],
['numpy', 'scipy']
]
def setup(self, n_samples, module):
self.x = np.arange(n_samples)
self.y = np.exp(-self.x/3.0)
self.z = np.random.normal(size=self.x.shape)
def time_interpolate(self, n_samples, module):
if module == 'scipy':
interpolate.interp1d(self.x, self.y, kind="linear")
else:
np.interp(self.z, self.x, self.y)

View file

@ -1,120 +0,0 @@
from __future__ import division, absolute_import, print_function
from .common import set_mem_rlimit, run_monitored, get_mem_info
import os
import tempfile
import collections
from io import BytesIO
import numpy as np
try:
from scipy.io import savemat, loadmat
except ImportError:
pass
from .common import Benchmark
class MemUsage(Benchmark):
param_names = ['size', 'compressed']
timeout = 4*60
unit = "actual/optimal memory usage ratio"
@property
def params(self):
return [self._get_sizes().keys(), [True, False]]
def _get_sizes(self):
sizes = collections.OrderedDict([
('1M', 1e6),
('10M', 10e6),
('100M', 100e6),
('300M', 300e6),
#('500M', 500e6),
#('1000M', 1000e6),
])
return sizes
def setup(self, size, compressed):
set_mem_rlimit()
self.sizes = self._get_sizes()
size = int(self.sizes[size])
mem_info = get_mem_info()
try:
mem_available = mem_info['memavailable']
except KeyError:
mem_available = mem_info['memtotal']
max_size = int(mem_available * 0.7)//4
if size > max_size:
raise NotImplementedError()
# Setup temp file
f = tempfile.NamedTemporaryFile(delete=False, suffix='.mat')
f.close()
self.filename = f.name
def teardown(self, size, compressed):
os.unlink(self.filename)
def track_loadmat(self, size, compressed):
size = int(self.sizes[size])
x = np.random.rand(size//8).view(dtype=np.uint8)
savemat(self.filename, dict(x=x), do_compression=compressed, oned_as='row')
del x
code = """
from scipy.io import loadmat
loadmat('%s')
""" % (self.filename,)
time, peak_mem = run_monitored(code)
return peak_mem / size
def track_savemat(self, size, compressed):
size = int(self.sizes[size])
code = """
import numpy as np
from scipy.io import savemat
x = np.random.rand(%d//8).view(dtype=np.uint8)
savemat('%s', dict(x=x), do_compression=%r, oned_as='row')
""" % (size, self.filename, compressed)
time, peak_mem = run_monitored(code)
return peak_mem / size
class StructArr(Benchmark):
params = [
[(10, 10, 20), (20, 20, 40), (30, 30, 50)],
[False, True]
]
param_names = ['(vars, fields, structs)', 'compression']
@staticmethod
def make_structarr(n_vars, n_fields, n_structs):
var_dict = {}
for vno in range(n_vars):
vname = 'var%00d' % vno
end_dtype = [('f%d' % d, 'i4', 10) for d in range(n_fields)]
s_arrs = np.zeros((n_structs,), dtype=end_dtype)
var_dict[vname] = s_arrs
return var_dict
def setup(self, nvfs, compression):
n_vars, n_fields, n_structs = nvfs
self.var_dict = StructArr.make_structarr(n_vars, n_fields, n_structs)
self.str_io = BytesIO()
savemat(self.str_io, self.var_dict, do_compression=compression)
def time_savemat(self, nvfs, compression):
savemat(self.str_io, self.var_dict, do_compression=compression)
def time_loadmat(self, nvfs, compression):
loadmat(self.str_io)

View file

@ -1,105 +0,0 @@
from __future__ import division, absolute_import, print_function
import numpy.linalg as nl
import numpy as np
from numpy.testing import assert_
from numpy.random import rand
try:
import scipy.linalg as sl
except ImportError:
pass
from .common import Benchmark
def random(size):
return rand(*size)
class Bench(Benchmark):
params = [
[20, 100, 500, 1000],
['contig', 'nocont'],
['numpy', 'scipy']
]
param_names = ['size', 'contiguous', 'module']
def setup(self, size, contig, module):
a = random([size,size])
# larger diagonal ensures non-singularity:
for i in range(size):
a[i,i] = 10*(.1+a[i,i])
b = random([size])
if contig != 'contig':
a = a[-1::-1,-1::-1] # turn into a non-contiguous array
assert_(not a.flags['CONTIGUOUS'])
self.a = a
self.b = b
def time_solve(self, size, contig, module):
if module == 'numpy':
nl.solve(self.a, self.b)
else:
sl.solve(self.a, self.b)
def time_inv(self, size, contig, module):
if module == 'numpy':
nl.inv(self.a)
else:
sl.inv(self.a)
def time_det(self, size, contig, module):
if module == 'numpy':
nl.det(self.a)
else:
sl.det(self.a)
def time_eigvals(self, size, contig, module):
if module == 'numpy':
nl.eigvals(self.a)
else:
sl.eigvals(self.a)
def time_svd(self, size, contig, module):
if module == 'numpy':
nl.svd(self.a)
else:
sl.svd(self.a)
class Norm(Benchmark):
params = [
[(20, 20), (100, 100), (1000, 1000), (20, 1000), (1000, 20)],
['contig', 'nocont'],
['numpy', 'scipy']
]
param_names = ['shape', 'contiguous', 'module']
def setup(self, shape, contig, module):
a = np.random.randn(*shape)
if contig != 'contig':
a = a[-1::-1,-1::-1] # turn into a non-contiguous array
assert_(not a.flags['CONTIGUOUS'])
self.a = a
def time_1_norm(self, size, contig, module):
if module == 'numpy':
nl.norm(self.a, ord=1)
else:
sl.norm(self.a, ord=1)
def time_inf_norm(self, size, contig, module):
if module == 'numpy':
nl.norm(self.a, ord=np.inf)
else:
sl.norm(self.a, ord=np.inf)
def time_frobenius_norm(self, size, contig, module):
if module == 'numpy':
nl.norm(self.a)
else:
sl.norm(self.a)

View file

@ -1,46 +0,0 @@
"""Benchmark the solve_toeplitz solver (Levinson recursion)
"""
from __future__ import division, absolute_import, print_function
import numpy as np
try:
import scipy.linalg
except ImportError:
pass
from .common import Benchmark
class SolveToeplitz(Benchmark):
params = (
('float64', 'complex128'),
(100, 300, 1000),
('toeplitz', 'generic')
)
param_names = ('dtype', 'n', 'solver')
def setup(self, dtype, n, soltype):
random = np.random.RandomState(1234)
dtype = np.dtype(dtype)
# Sample a random Toeplitz matrix representation and rhs.
c = random.randn(n)
r = random.randn(n)
y = random.randn(n)
if dtype == np.complex128:
c = c + 1j*random.rand(n)
r = r + 1j*random.rand(n)
y = y + 1j*random.rand(n)
self.c = c
self.r = r
self.y = y
self.T = scipy.linalg.toeplitz(c, r=r)
def time_solve_toeplitz(self, dtype, n, soltype):
if soltype == 'toeplitz':
scipy.linalg.solve_toeplitz((self.c, self.r), self.y)
else:
scipy.linalg.solve(self.T, self.y)

View file

@ -1,37 +0,0 @@
""" Benchmark linalg.sqrtm for various blocksizes.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
try:
import scipy.linalg
except ImportError:
pass
from .common import Benchmark
class Sqrtm(Benchmark):
params = [
['float64', 'complex128'],
[64, 256],
[32, 64, 256]
]
param_names = ['dtype', 'n', 'blocksize']
def setup(self, dtype, n, blocksize):
n = int(n)
dtype = np.dtype(dtype)
blocksize = int(blocksize)
A = np.random.rand(n, n)
if dtype == np.complex128:
A = A + 1j*np.random.rand(n, n)
self.A = A
if blocksize > n:
raise NotImplementedError()
def time_sqrtm(self, dtype, n, blocksize):
scipy.linalg.sqrtm(self.A, disp=False, blocksize=blocksize)

View file

@ -1,490 +0,0 @@
"""Benchmark problems for nonlinear least squares."""
from __future__ import division
from collections import OrderedDict
import inspect
import sys
import numpy as np
from numpy.polynomial.chebyshev import Chebyshev
from scipy.integrate import odeint
class LSQBenchmarkProblem(object):
"""Template class for nonlinear least squares benchmark problems.
The optimized variable is n-dimensional vector x and the objective function
has the form
F(x) = ||f(x)||^2 = sum(f_i(x)^2, i = 1, ..., m)
Where f is a vector function f = (f_1, ..., f_m), we call f_i as residuals.
Jacobian of f is an m by n matrix, its (i, j) element is the partial
derivative of f_i with respect to x_j.
Parameters
----------
n : int
Number of optimized variables.
m : int
Number of residuals.
x0 : ndarray, shape(n, )
Initial guess for optimized variable.
fopt : float
The sum of squared residuals at the optimum point. It must be provided
with the relative accuracy orders of magnitude higher than expected
`ftol` parameter of benchmarked optimization method.
lb : None or ndarray, shape(n, ), optional
Lower bounds for each optimized variable, -np.inf specifies no bound.
None means no bound for all variables.
ub : None or ndarray, shape(n ), optional
Upper bound for each optimized variable, np.inf specified no bound.
None means no bound for all variables.
Attributes
----------
INITIAL_GUESSES : list of ndarray
List containing initial guesses to try. Fill this list in a derived
class with at least one item.
"""
INITIAL_GUESSES = None
def __init__(self, n, m, fopt, x0, lb=None, ub=None):
self.n = n
self.m = m
self.fopt = fopt
self.x0 = x0
self.lb = lb
self.ub = ub
def fun(self, x):
"""Evaluate residuals at point `x`.
Parameters
----------
x : ndarray, shape (n,)
Point of evaluation.
Returns
-------
ndarray, shape (m,)
Vector of residuals at point `x`.
"""
raise NotImplementedError
def jac(self, x):
"""Evaluate jacobian at point x.
Parameters
----------
x : ndarray, shape (n,)
Vector of residuals f(x).
Returns
-------
ndarray, shape (m, n)
Jacobian matrix of `self.fun` at point `x`.
"""
raise NotImplementedError
def check_answer(self, x, ftol):
"""Check if `x` yields the objective value close enough to
the optimal value.
Parameters
----------
x : ndarray, shape (n,)
The point to test.
ftol : float
Maximum allowed relative error in the objective function value.
Returns
-------
bool
Whether `x` is optimal enough. If `x` violates bounds constraints
then False is returned.
"""
if (self.lb is not None and np.any(x < self.lb) or
self.ub is not None and np.any(x > self.ub)):
return False
f = np.sum(self.fun(x) ** 2)
return f < (1 + ftol) * self.fopt
class AlphaPineneDirect(LSQBenchmarkProblem):
"""Isomerization of alpha-pinene problem, direct formulation [1]_.
Number of variables --- 5, number of residuals --- 40, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 20
"""
INITIAL_GUESSES = [
np.array([5.84, 2.65, 1.63, 27.77, 4.61]) * 1e-5
]
def __init__(self, x0):
super(AlphaPineneDirect, self).__init__(5, 40, 2.064572e1, x0)
self.t = np.array([0, 1230, 3060, 4920, 7800, 10680, 15030, 22620,
36420], dtype=float)
self.y0 = np.array([100, 0, 0, 0, 0], dtype=float)
self.y = np.array([
[100, 0, 0, 0, 0],
[88.35, 7.3, 2.3, 0.4, 1.75],
[76.4, 15.6, 4.5, 0.7, 2.8],
[65.1, 23.1, 5.3, 1.1, 5.8],
[50.4, 32.9, 6, 1.5, 9.3],
[37.5, 42.7, 6.0, 1.9, 12],
[25.9, 49.1, 5.9, 2.2, 17],
[14, 57.4, 5.1, 2.6, 21],
[4.5, 63.1, 3.8, 2.9, 25.7]
])
def fun_ode_rhs(self, y, t, x):
return np.array(
[-(x[0] + x[1]) * y[0],
x[0] * y[0],
x[1] * y[0] - (x[2] + x[3]) * y[2] + x[4] * y[4],
x[2] * y[2],
x[3] * y[2] - x[4] * y[4]]
)
def jac_ode_rhs(self, y, t, x):
jac_part = np.array(
[-y[0], -y[0], 0, 0, 0,
y[0], 0, 0, 0, 0,
0, y[0], -y[2], -y[2], y[4],
0, 0, y[2], 0, 0,
0, 0, 0, y[2], -y[4]]
)
return np.hstack((self.fun_ode_rhs(y, t, x), jac_part))
def fun(self, x):
y_hat = odeint(self.fun_ode_rhs, self.y0, self.t, args=(x,))
return y_hat[1:].ravel() - self.y[1:].ravel()
def jac(self, x):
result = odeint(self.jac_ode_rhs, np.hstack((self.y0, np.zeros(25))),
self.t, args=(x,))
return result[1:, 5:].reshape((40, 5))
class CoatingThickness(LSQBenchmarkProblem):
"""Coating thickness standardization problem, [1]_.
Number of variables --- 134, number of residuals --- 252, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 25
"""
INITIAL_GUESSES = [
np.hstack(([-8.0, 13.0, 1.2, 0.2, 0.1, 6.0, 5.5, -5.2],
np.zeros(126)))
]
def __init__(self, x0):
super(CoatingThickness, self).__init__(134, 252, 0.5054986, x0)
self.n0 = self.m // 4
self.xi = np.array([
[0.7140, 0.7169, 0.7232, 0.7151, 0.6848, 0.7070, 0.7177, 0.7073,
0.6734, 0.7174, 0.7125, 0.6947, 0.7121, 0.7166, 0.6894, 0.6897,
0.7024, 0.7026, 0.6800, 0.6957, 0.6987, 0.7111, 0.7097, 0.6809,
0.7139, 0.7046, 0.6950, 0.7032, 0.7019, 0.6975, 0.6955, 0.7056,
0.6965, 0.6848, 0.6995, 0.6105, 0.6027, 0.6084, 0.6081, 0.6057,
0.6116, 0.6052, 0.6136, 0.6032, 0.6081, 0.6092, 0.6122, 0.6157,
0.6191, 0.6169, 0.5483, 0.5371, 0.5576, 0.5521, 0.5495, 0.5499,
0.4937, 0.5092, 0.5433, 0.5018, 0.5363, 0.4977, 0.5296],
[5.145, 5.241, 5.389, 5.211, 5.154, 5.105, 5.191, 5.013, 5.582,
5.208, 5.142, 5.284, 5.262, 6.838, 6.215, 6.817, 6.889, 6.732,
6.717, 6.468, 6.776, 6.574, 6.465, 6.090, 6.350, 4.255, 4.154,
4.211, 4.287, 4.104, 4.007, 4.261, 4.150, 4.040, 4.155, 5.086,
5.021, 5.040, 5.247, 5.125, 5.136, 4.949, 5.253, 5.154, 5.227,
5.120, 5.291, 5.294, 5.304, 5.209, 5.384, 5.490, 5.563, 5.532,
5.372, 5.423, 7.237, 6.944, 6.957, 7.138, 7.009, 7.074, 7.046]
])
self.y = np.array(
[9.3636, 9.3512, 9.4891, 9.1888, 9.3161, 9.2585, 9.2913, 9.3914,
9.4524, 9.4995, 9.4179, 9.468, 9.4799, 11.2917, 11.5062, 11.4579,
11.3977, 11.3688, 11.3897, 11.3104, 11.3882, 11.3629, 11.3149,
11.2474, 11.2507, 8.1678, 8.1017, 8.3506, 8.3651, 8.2994, 8.1514,
8.2229, 8.1027, 8.3785, 8.4118, 8.0955, 8.0613, 8.0979, 8.1364,
8.1700, 8.1684, 8.0885, 8.1839, 8.1478, 8.1827, 8.029, 8.1000,
8.2579, 8.2248, 8.2540, 6.8518, 6.8547, 6.8831, 6.9137, 6.8984,
6.8888, 8.5189, 8.5308, 8.5184, 8.5222, 8.5705, 8.5353, 8.5213,
8.3158, 8.1995, 8.2283, 8.1857, 8.2738, 8.2131, 8.2613, 8.2315,
8.2078, 8.2996, 8.3026, 8.0995, 8.2990, 9.6753, 9.6687, 9.5704,
9.5435, 9.6780, 9.7668, 9.7827, 9.7844, 9.7011, 9.8006, 9.7610,
9.7813, 7.3073, 7.2572, 7.4686, 7.3659, 7.3587, 7.3132, 7.3542,
7.2339, 7.4375, 7.4022, 10.7914, 10.6554, 10.7359, 10.7583,
10.7735, 10.7907, 10.6465, 10.6994, 10.7756, 10.7402, 10.6800,
10.7000, 10.8160, 10.6921, 10.8677, 12.3495, 12.4424, 12.4303,
12.5086, 12.4513, 12.4625, 16.2290, 16.2781, 16.2082, 16.2715,
16.2464, 16.1626, 16.1568]
)
self.scale1 = 4.08
self.scale2 = 0.417
def fun(self, x):
xi = np.vstack(
(self.xi[0] + x[8:8 + self.n0],
self.xi[1] + x[8 + self.n0:])
)
z1 = x[0] + x[1] * xi[0] + x[2] * xi[1] + x[3] * xi[0] * xi[1]
z2 = x[4] + x[5] * xi[0] + x[6] * xi[1] + x[7] * xi[0] * xi[1]
return np.hstack(
(z1 - self.y[:self.n0],
z2 - self.y[self.n0:],
self.scale1 * x[8:8 + self.n0],
self.scale2 * x[8 + self.n0:])
)
def jac(self, x):
J = np.zeros((self.m, self.n))
ind = np.arange(self.n0)
xi = np.vstack(
(self.xi[0] + x[8:8 + self.n0],
self.xi[1] + x[8 + self.n0:])
)
J[:self.n0, 0] = 1
J[:self.n0, 1] = xi[0]
J[:self.n0, 2] = xi[1]
J[:self.n0, 3] = xi[0] * xi[1]
J[ind, ind + 8] = x[1] + x[3] * xi[1]
J[ind, ind + 8 + self.n0] = x[2] + x[3] * xi[0]
J[self.n0:2 * self.n0, 4] = 1
J[self.n0:2 * self.n0, 5] = xi[0]
J[self.n0:2 * self.n0, 6] = xi[1]
J[self.n0:2 * self.n0, 7] = xi[0] * xi[1]
J[ind + self.n0, ind + 8] = x[5] + x[7] * xi[1]
J[ind + self.n0, ind + 8 + self.n0] = x[6] + x[7] * xi[0]
J[ind + 2 * self.n0, ind + 8] = self.scale1
J[ind + 3 * self.n0, ind + 8 + self.n0] = self.scale2
return J
class ExponentialFitting(LSQBenchmarkProblem):
"""The problem of fitting the sum of exponentials with linear degrees
to data, [1]_.
Number of variables --- 5, number of residuals --- 33, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 26
"""
INITIAL_GUESSES = [
np.array([0.5, 1.5, -1, 1e-2, 2e-2])
]
def __init__(self, x0):
super(ExponentialFitting, self).__init__(5, 33, 5.464895e-5, x0)
self.t = np.arange(self.m, dtype=float) * 10
self.y = 1e-1 * np.array(
[8.44, 9.08, 9.32, 9.36, 9.25, 9.08, 8.81, 8.5, 8.18,
7.84, 7.51, 7.18, 6.85, 6.58, 6.28, 6.03, 5.8, 5.58,
5.38, 5.22, 5.06, 4.9, 4.78, 4.67, 4.57, 4.48, 4.38,
4.31, 4.24, 4.2, 4.14, 4.11, 4.06]
)
def fun(self, x):
return (x[0] + x[1] * np.exp(-x[3] * self.t) +
x[2] * np.exp(-x[4] * self.t) - self.y)
def jac(self, x):
J = np.empty((self.m, self.n))
J[:, 0] = 1
J[:, 1] = np.exp(-x[3] * self.t)
J[:, 2] = np.exp(-x[4] * self.t)
J[:, 3] = -x[1] * self.t * np.exp(-x[3] * self.t)
J[:, 4] = -x[2] * self.t * np.exp(-x[4] * self.t)
return J
class GaussianFitting(LSQBenchmarkProblem):
"""The problem of fitting the sum of exponentials with linear and
quadratic degrees to data, [1]_.
Number of variables --- 11, number of residuals --- 65, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 27
"""
INITIAL_GUESSES = [
np.array([1.3, 6.5e-1, 6.5e-1, 7.0e-1, 6.0e-1,
3.0, 5.0, 7.0, 2.0, 4.5, 5.5])
]
def __init__(self, x0):
super(GaussianFitting, self).__init__(11, 65, 4.013772e-02, x0)
self.t = np.arange(self.m, dtype=float) * 1e-1
self.y = np.array(
[1.366, 1.191, 1.112, 1.013, 9.91e-1, 8.85e-1, 8.31e-1, 8.47e-1,
7.86e-1, 7.25e-1, 7.46e-1, 6.79e-1, 6.08e-1, 6.55e-1, 6.16e-1,
6.06e-1, 6.02e-1, 6.26e-1, 6.51e-1, 7.24e-1, 6.49e-1, 6.49e-1,
6.94e-1, 6.44e-1, 6.24e-1, 6.61e-1, 6.12e-1, 5.58e-1, 5.33e-1,
4.95e-1, 5.0e-1, 4.23e-1, 3.95e-1, 3.75e-1, 3.72e-1, 3.91e-1,
3.96e-1, 4.05e-1, 4.28e-1, 4.29e-1, 5.23e-1, 5.62e-1, 6.07e-1,
6.53e-1, 6.72e-1, 7.08e-1, 6.33e-1, 6.68e-1, 6.45e-1, 6.32e-1,
5.91e-1, 5.59e-1, 5.97e-1, 6.25e-1, 7.39e-1, 7.1e-1, 7.29e-1,
7.2e-1, 6.36e-1, 5.81e-1, 4.28e-1, 2.92e-1, 1.62e-1, 9.8e-2,
5.4e-2]
)
def fun(self, x):
return (x[0] * np.exp(-x[4] * self.t) +
x[1] * np.exp(-x[5] * (self.t - x[8]) ** 2) +
x[2] * np.exp(-x[6] * (self.t - x[9]) ** 2) +
x[3] * np.exp(-x[7] * (self.t - x[10]) ** 2) - self.y)
def jac(self, x):
J = np.empty((self.m, self.n))
e0 = np.exp(-x[4] * self.t)
e1 = np.exp(-x[5] * (self.t - x[8]) ** 2)
e2 = np.exp(-x[6] * (self.t - x[9]) ** 2)
e3 = np.exp(-x[7] * (self.t - x[10]) ** 2)
J[:, 0] = e0
J[:, 1] = e1
J[:, 2] = e2
J[:, 3] = e3
J[:, 4] = -x[0] * self.t * e0
J[:, 5] = -x[1] * (self.t - x[8]) ** 2 * e1
J[:, 6] = -x[2] * (self.t - x[9]) ** 2 * e2
J[:, 7] = -x[3] * (self.t - x[10]) ** 2 * e3
J[:, 8] = 2 * x[1] * x[5] * (self.t - x[8]) * e1
J[:, 9] = 2 * x[2] * x[6] * (self.t - x[9]) * e2
J[:, 10] = 2 * x[3] * x[7] * (self.t - x[10]) * e3
return J
class ThermistorResistance(LSQBenchmarkProblem):
"""The problem of fitting thermistor parameters to data, [1]_.
Number of variables --- 3, number of residuals --- 16, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 28
"""
INITIAL_GUESSES = [
np.array([2e-2, 4e3, 2.5e2])
]
def __init__(self, x0_ind):
super(ThermistorResistance, self).__init__(3, 16, 87.94585, x0_ind)
self.t = 5 + 45 * (1 + np.arange(self.m, dtype=float))
self.y = np.array(
[3.478e4, 2.861e4, 2.365e4, 1.963e4, 1.637e4, 1.372e4, 1.154e4,
9.744e3, 8.261e3, 7.03e3, 6.005e3, 5.147e3, 4.427e3, 3.82e3,
3.307e3, 2.872e3]
)
def fun(self, x):
return x[0] * np.exp(x[1] / (self.t + x[2])) - self.y
def jac(self, x):
J = np.empty((self.m, self.n))
e = np.exp(x[1] / (self.t + x[2]))
J[:, 0] = e
J[:, 1] = x[0] / (self.t + x[2]) * e
J[:, 2] = -x[0] * x[1] * (self.t + x[2]) ** -2 * e
return J
class EnzymeReaction(LSQBenchmarkProblem):
"""The problem of fitting kinetic parameters for an enzyme reaction, [1]_.
Number of variables --- 4, number of residuals --- 11, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 29
"""
INITIAL_GUESSES = [
np.array([2.5, 3.9, 4.15, 3.9]) * 1e-1
]
def __init__(self, x0_ind):
super(EnzymeReaction, self).__init__(4, 11, 3.075057e-04, x0_ind)
self.u = np.array([4.0, 2.0, 1.0, 5.0e-1, 2.5e-1, 1.67e-1,
1.25e-1, 1.0e-1, 8.33e-2, 7.14e-2, 6.25e-2])
self.y = np.array([1.957e-1, 1.947e-1, 1.735e-1, 1.6e-1, 8.44e-2,
6.27e-2, 4.56e-2, 3.42e-2, 3.23e-2, 2.35e-2,
2.46e-2])
def fun(self, x):
return (x[0] * (self.u ** 2 + x[1] * self.u) /
(self.u ** 2 + x[2] * self.u + x[3]) - self.y)
def jac(self, x):
J = np.empty((self.m, self.n))
den = self.u ** 2 + x[2] * self.u + x[3]
num = self.u ** 2 + x[1] * self.u
J[:, 0] = num / den
J[:, 1] = x[0] * self.u / den
J[:, 2] = -x[0] * num * self.u / den ** 2
J[:, 3] = -x[0] * num / den ** 2
return J
class ChebyshevQuadrature(LSQBenchmarkProblem):
"""The problem of determining the optimal nodes of a quadrature formula
with equal weights, [1]_.
Number of variables --- 11, number of residuals --- 11, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 30
"""
INITIAL_GUESSES = [
(1 + np.arange(11, dtype=float)) / 12
]
def __init__(self, x0):
super(ChebyshevQuadrature, self).__init__(11, 11, 2.799761e-03, x0)
cp = Chebyshev(1)
self.T_all = [cp.basis(i, domain=[0.0, 1.0]) for i in range(11)]
def fun(self, x):
f = np.empty(self.n)
for i in range(self.m):
T = self.T_all[i]
f[i] = np.mean(T(x)) - T.integ(lbnd=0.0)(1.0)
return f
def jac(self, x):
J = np.empty((self.m, self.n))
for i in range(self.m):
T = self.T_all[i]
J[i] = T.deriv()(x)
J /= self.n
return J
def extract_lsq_problems():
"""Extract all least squares problems in this file for benchmarking.
Returns
-------
OrderedDict, str -> LSQBenchmarkProblem
The key is a problem name.
The value is an instance of LSQBenchmarkProblem.
"""
problems = OrderedDict()
for name, problem_class in inspect.getmembers(sys.modules[__name__],
inspect.isclass):
if (name != "LSQBenchmarkProblem" and
issubclass(problem_class, LSQBenchmarkProblem) and
hasattr(problem_class, 'INITIAL_GUESSES')):
for i, x0 in enumerate(problem_class.INITIAL_GUESSES):
if len(problem_class.INITIAL_GUESSES) > 1:
key_name = "{}_{}".format(name, i)
else:
key_name = name
problems[key_name] = problem_class(x0)
return problems

View file

@ -1,277 +0,0 @@
from __future__ import division, print_function, absolute_import
import time
from collections import defaultdict
import numpy as np
try:
import scipy.optimize
from scipy.optimize.optimize import rosen, rosen_der, rosen_hess
from scipy.optimize import leastsq
except ImportError:
pass
from . import test_functions as funcs
from .common import Benchmark
from .lsq_problems import extract_lsq_problems
class _BenchOptimizers(Benchmark):
"""a framework for benchmarking the optimizer
Parameters
----------
function_name : string
fun : callable
der : callable
function that returns the derivative (jacobian, gradient) of fun
hess : callable
function that returns the hessian of fun
minimizer_kwargs : kwargs
additional keywords passed to the minimizer. e.g. tol, maxiter
"""
def __init__(self, function_name, fun, der=None, hess=None,
**minimizer_kwargs):
self.function_name = function_name
self.fun = fun
self.der = der
self.hess = hess
self.minimizer_kwargs = minimizer_kwargs
if "tol" not in minimizer_kwargs:
minimizer_kwargs["tol"] = 1e-4
self.results = []
def reset(self):
self.results = []
def add_result(self, result, t, name):
"""add a result to the list"""
result.time = t
result.name = name
if not hasattr(result, "njev"):
result.njev = 0
if not hasattr(result, "nhev"):
result.nhev = 0
self.results.append(result)
def print_results(self):
"""print the current list of results"""
results = self.average_results()
results = sorted(results, key=lambda x: (x.nfail, x.mean_time))
if not results:
return
print("")
print("=========================================================")
print("Optimizer benchmark: %s" % (self.function_name))
print("dimensions: %d, extra kwargs: %s" % (results[0].ndim, str(self.minimizer_kwargs)))
print("averaged over %d starting configurations" % (results[0].ntrials))
print(" Optimizer nfail nfev njev nhev time")
print("---------------------------------------------------------")
for res in results:
print("%11s | %4d | %4d | %4d | %4d | %.6g" %
(res.name, res.nfail, res.mean_nfev, res.mean_njev, res.mean_nhev, res.mean_time))
def average_results(self):
"""group the results by minimizer and average over the runs"""
grouped_results = defaultdict(list)
for res in self.results:
grouped_results[res.name].append(res)
averaged_results = dict()
for name, result_list in grouped_results.items():
newres = scipy.optimize.OptimizeResult()
newres.name = name
newres.mean_nfev = np.mean([r.nfev for r in result_list])
newres.mean_njev = np.mean([r.njev for r in result_list])
newres.mean_nhev = np.mean([r.nhev for r in result_list])
newres.mean_time = np.mean([r.time for r in result_list])
newres.ntrials = len(result_list)
newres.nfail = len([r for r in result_list if not r.success])
try:
newres.ndim = len(result_list[0].x)
except TypeError:
newres.ndim = 1
averaged_results[name] = newres
return averaged_results.values()
def bench_run(self, x0, methods=None, **minimizer_kwargs):
"""do an optimization test starting at x0 for all the optimizers"""
kwargs = self.minimizer_kwargs
if methods is None:
methods = ["COBYLA", 'Powell',
'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
"Newton-CG", 'dogleg', 'trust-ncg']
fonly_methods = ["COBYLA", 'Powell']
for method in fonly_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
gradient_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP']
if self.der is not None:
for method in gradient_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, **kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
hessian_methods = ["Newton-CG", 'dogleg', 'trust-ncg']
if self.hess is not None:
for method in hessian_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, hess=self.hess,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
class BenchSmoothUnbounded(Benchmark):
"""Benchmark the optimizers with smooth, unbounded, functions"""
params = [
['rosenbrock', 'rosenbrock_tight',
'simple_quadratic', 'asymmetric_quadratic',
'sin_1d', 'booth', 'beale', 'LJ'],
["COBYLA", 'Powell',
'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
"Newton-CG", 'dogleg', 'trust-ncg'],
["mean_nfev", "mean_time"]
]
param_names = ["test function", "solver", "result type"]
def setup(self, func_name, method_name, ret_val):
b = getattr(self, 'run_' + func_name)(methods=[method_name])
results = b.average_results()
result = None
for r in results:
if r.name == method_name:
result = getattr(r, ret_val)
break
if result is None:
raise NotImplementedError()
self.result = result
def track_all(self, func_name, method_name, ret_val):
return self.result
def run_rosenbrock(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess)
for i in range(10):
b.bench_run(np.random.uniform(-3,3,3), methods=methods)
return b
def run_rosenbrock_tight(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess,
tol=1e-8)
for i in range(10):
b.bench_run(np.random.uniform(-3,3,3), methods=methods)
return b
def run_simple_quadratic(self, methods=None):
s = funcs.SimpleQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("simple quadratic function",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2,2,3), methods=methods)
return b
def run_asymmetric_quadratic(self, methods=None):
s = funcs.AsymmetricQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("function sum(x**2) + x[0]",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2,2,3), methods=methods)
return b
def run_sin_1d(self, methods=None):
fun = lambda x: np.sin(x[0])
der = lambda x: np.array([np.cos(x[0])])
b = _BenchOptimizers("1d sin function",
fun=fun, der=der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2,2,1), methods=methods)
return b
def run_booth(self, methods=None):
s = funcs.Booth()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("Booth's function",
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(0,10,2), methods=methods)
return b
def run_beale(self, methods=None):
s = funcs.Beale()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("Beale's function",
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(0,10,2), methods=methods)
return b
def run_LJ(self, methods=None):
s = funcs.LJ()
# print "checking gradient", scipy.optimize.check_grad(s.get_energy, s.get_gradient, np.random.uniform(-2,2,3*4))
natoms = 4
b = _BenchOptimizers("%d atom Lennard Jones potential" % (natoms),
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2,2,natoms*3), methods=methods)
return b
class BenchLeastSquares(Benchmark):
"""Class for benchmarking nonlinear least squares solvers."""
problems = extract_lsq_problems()
params = [
list(problems.keys()),
["average time", "nfev", "success"]
]
param_names = [
"problem", "result type"
]
def track_all(self, problem_name, result_type):
problem = self.problems[problem_name]
if problem.lb is not None or problem.ub is not None:
raise NotImplementedError
ftol = 1e-5
if result_type == 'average time':
n_runs = 10
t0 = time.time()
for _ in range(n_runs):
leastsq(problem.fun, problem.x0, Dfun=problem.jac, ftol=ftol,
full_output=True)
return (time.time() - t0) / n_runs
x, cov_x, info, message, ier = leastsq(
problem.fun, problem.x0, Dfun=problem.jac,
ftol=ftol, full_output=True
)
if result_type == 'nfev':
return info['nfev']
elif result_type == 'success':
return int(problem.check_answer(x, ftol))
else:
raise NotImplementedError

View file

@ -1,29 +0,0 @@
from __future__ import division, print_function, absolute_import
from math import sqrt
# Import testing parameters
try:
from scipy.optimize._tstutils import methods, mstrings, functions, fstrings
except ImportError:
pass
from .common import Benchmark
class Zeros(Benchmark):
params = [
fstrings,
mstrings
]
param_names = ['test function', 'solver']
def setup(self, func, meth):
self.a = .5
self.b = sqrt(3)
self.func = functions[fstrings.index(func)]
self.meth = methods[mstrings.index(meth)]
def time_zeros(self, func, meth):
self.meth(self.func, self.a, self.b)

View file

@ -1,68 +0,0 @@
from __future__ import division, absolute_import, print_function
from itertools import product
import numpy as np
try:
from scipy.signal import (convolve2d, correlate2d, lti, lsim, lsim2, welch,
csd)
except ImportError:
pass
from .common import Benchmark
class CalculateWindowedFFT(Benchmark):
def setup(self):
np.random.seed(5678)
# Create some long arrays for computation
x = np.random.randn(2**20)
y = np.random.randn(2**20)
self.x = x
self.y = y
def time_welch(self):
welch(self.x)
def time_csd(self):
csd(self.x, self.y)
class Convolve2D(Benchmark):
def setup(self):
np.random.seed(1234)
# sample a bunch of pairs of 2d arrays
pairs = []
for ma, na, mb, nb in product((1, 2, 8, 13, 30), repeat=4):
a = np.random.randn(ma, na)
b = np.random.randn(mb, nb)
pairs.append((a, b))
self.pairs = pairs
def time_convolutions(self):
fns = (convolve2d, correlate2d)
modes = ('full', 'valid', 'same')
boundaries = ('fill', 'wrap', 'symm')
# compute 2d convolutions and correlations for each 2d array pair
for a, b in self.pairs:
for fn, mode, boundary in product(fns, modes, boundaries):
if mode == 'valid':
if b.shape[0] > a.shape[0] or b.shape[1] > a.shape[1]:
continue
fn(a, b, mode=mode, boundary=boundary)
class LTI(Benchmark):
def setup(self):
self.system = lti(1.0, [1, 0, 1])
self.t = np.arange(0, 100, 0.5)
self.u = np.sin(2 * self.t)
def time_lsim(self):
lsim(self.system, self.u, self.t)
def time_lsim2(self):
lsim2(self.system, self.u, self.t)

View file

@ -1,336 +0,0 @@
"""
Simple benchmarks for the sparse module
"""
from __future__ import division, print_function, absolute_import
import warnings
import time
import collections
import timeit
import numpy
import numpy as np
from numpy import ones, array, asarray, empty, random, zeros
try:
from scipy import sparse
from scipy.sparse import (csr_matrix, coo_matrix, dia_matrix, lil_matrix,
dok_matrix, rand, SparseEfficiencyWarning)
except ImportError:
pass
from .common import Benchmark
def random_sparse(m,n,nnz_per_row):
rows = numpy.arange(m).repeat(nnz_per_row)
cols = numpy.random.random_integers(low=0,high=n-1,size=nnz_per_row*m)
vals = numpy.random.random_sample(m*nnz_per_row)
return coo_matrix((vals,(rows,cols)),(m,n)).tocsr()
# TODO move this to a matrix gallery and add unittests
def poisson2d(N,dtype='d',format=None):
"""
Return a sparse matrix for the 2D Poisson problem
with standard 5-point finite difference stencil on a
square N-by-N grid.
"""
if N == 1:
diags = asarray([[4]],dtype=dtype)
return dia_matrix((diags,[0]), shape=(1,1)).asformat(format)
offsets = array([0,-N,N,-1,1])
diags = empty((5,N**2),dtype=dtype)
diags[0] = 4 # main diagonal
diags[1:] = -1 # all offdiagonals
diags[3,N-1::N] = 0 # first lower diagonal
diags[4,N::N] = 0 # first upper diagonal
return dia_matrix((diags,offsets),shape=(N**2,N**2)).asformat(format)
class Arithmetic(Benchmark):
param_names = ['format', 'XY', 'op']
params = [
['csr'],
['AA', 'AB', 'BA', 'BB'],
['__add__', '__sub__', 'multiply', '__mul__']
]
def setup(self, format, XY, op):
self.matrices = {}
# matrices.append( ('A','Identity', sparse.eye(500**2,format='csr')) )
self.matrices['A'] = poisson2d(250,format='csr')
self.matrices['B'] = poisson2d(250,format='csr')**2
X, Y = XY
vars = dict([(var, mat.asformat(format))
for (var, mat) in self.matrices.items()])
self.x, self.y = vars[X], vars[Y]
self.fn = getattr(self.x, op)
self.fn(self.y) # warmup
def time_arithmetic(self, format, XY, op):
self.fn(self.y)
class Sort(Benchmark):
params = ['Rand10', 'Rand25', 'Rand50', 'Rand100', 'Rand200']
param_names = ['matrix']
def setup(self, matrix):
matrices = []
matrices.append(('Rand10', (1e4, 10)))
matrices.append(('Rand25', (1e4, 25)))
matrices.append(('Rand50', (1e4, 50)))
matrices.append(('Rand100', (1e4, 100)))
matrices.append(('Rand200', (1e4, 200)))
self.matrices = dict(matrices)
N, K = self.matrices[matrix]
N = int(float(N))
K = int(float(K))
self.A = random_sparse(N,N,K)
def time_sort(self, matrix):
"""sort CSR column indices"""
self.A.has_sorted_indices = False
self.A.indices[:2] = 2,1
self.A.sort_indices()
class Matvec(Benchmark):
param_names = ['matrix']
@property
def params(self):
return list(sorted(self._get_matrices().keys()))
def _get_matrices(self):
matrices = collections.OrderedDict()
matrices['Identity_dia'] = sparse.eye(10**4,10**4,format='dia')
matrices['Identity_csr'] = sparse.eye(10**4,10**4,format='csr')
matrices['Poisson5pt_lil'] = poisson2d(300,format='lil')
matrices['Poisson5pt_dok'] = poisson2d(300,format='dok')
matrices['Poisson5pt_dia'] = poisson2d(300,format='dia')
matrices['Poisson5pt_coo'] = poisson2d(300,format='coo')
matrices['Poisson5pt_csr'] = poisson2d(300,format='csr')
matrices['Poisson5pt_csc'] = poisson2d(300,format='csc')
matrices['Poisson5pt_bsr'] = poisson2d(300,format='bsr')
A = sparse.kron(poisson2d(150),ones((2,2))).tobsr(blocksize=(2,2))
matrices['Block2x2_csr'] = A.tocsr()
matrices['Block2x2_bsr'] = A
A = sparse.kron(poisson2d(100),ones((3,3))).tobsr(blocksize=(3,3))
matrices['Block3x3_csr'] = A.tocsr()
matrices['Block3x3_bsr'] = A
return matrices
def setup(self, matrix):
self.matrices = self._get_matrices()
self.x = ones(max(A.shape[1] for A in self.matrices.values()),
dtype=float)
self.A = self.matrices[matrix]
self.x = ones(self.A.shape[1], dtype=float)
def time_matvec(self, matrix):
self.A * self.x
class Matvecs(Benchmark):
params = ['dia', 'coo', 'csr', 'csc', 'bsr']
param_names = ["format"]
def setup(self, *args):
self.matrices = {}
self.matrices['dia'] = poisson2d(300,format='dia')
self.matrices['coo'] = poisson2d(300,format='coo')
self.matrices['csr'] = poisson2d(300,format='csr')
self.matrices['csc'] = poisson2d(300,format='csc')
self.matrices['bsr'] = poisson2d(300,format='bsr')
A = self.matrices['dia']
self.x = ones((A.shape[1], 10), dtype=A.dtype)
def time_matvecs(self, fmt):
A = self.matrices[fmt]
A*self.x
class Matmul(Benchmark):
def setup(self):
H1, W1 = 1, 100000
H2, W2 = W1, 1000
C1 = 10
C2 = 1000000
random.seed(0)
matrix1 = lil_matrix(zeros((H1, W1)))
matrix2 = lil_matrix(zeros((H2, W2)))
for i in range(C1):
matrix1[random.randint(H1), random.randint(W1)] = random.rand()
for i in range(C2):
matrix2[random.randint(H2), random.randint(W2)] = random.rand()
self.matrix1 = matrix1.tocsr()
self.matrix2 = matrix2.tocsr()
def time_large(self):
for i in range(100):
self.matrix1 * self.matrix2
class Construction(Benchmark):
params = [
['Empty', 'Identity', 'Poisson5pt'],
['lil', 'dok']
]
param_names = ['matrix', 'format']
def setup(self, name, format):
self.matrices = {}
self.matrices['Empty'] = csr_matrix((10000,10000))
self.matrices['Identity'] = sparse.eye(10000)
self.matrices['Poisson5pt'] = poisson2d(100)
self.formats = {'lil': lil_matrix, 'dok': dok_matrix}
A = self.matrices[name]
self.cls = self.formats[format]
self.A = A.tocoo()
def time_construction(self, name, format):
T = self.cls(self.A.shape)
for i,j,v in zip(self.A.row,self.A.col,self.A.data):
T[i,j] = v
class Conversion(Benchmark):
params = [
['csr','csc','coo','dia','lil','dok'],
['csr','csc','coo','dia','lil','dok'],
]
param_names = ['from_format', 'to_format']
def setup(self, fromfmt, tofmt):
self.A = poisson2d(100)
A = self.A
base = getattr(A,'to' + fromfmt)()
try:
self.fn = getattr(base, 'to' + tofmt)
except:
def fn():
raise RuntimeError()
self.fn = fn
def time_conversion(self, fromfmt, tofmt):
self.fn()
class Getset(Benchmark):
params = [
[1, 10, 100, 1000, 10000],
['different', 'same'],
['csr', 'csc', 'lil', 'dok']
]
param_names = ['N', 'sparsity pattern', 'format']
unit = "seconds"
def setup(self, N, sparsity_pattern, format):
if format == 'dok' and N > 500:
raise NotImplementedError()
self.A = rand(1000, 1000, density=1e-5)
A = self.A
N = int(N)
# indices to assign to
i, j = [], []
while len(i) < N:
n = N - len(i)
ip = numpy.random.randint(0, A.shape[0], size=n)
jp = numpy.random.randint(0, A.shape[1], size=n)
i = numpy.r_[i, ip]
j = numpy.r_[j, jp]
v = numpy.random.rand(n)
if N == 1:
i = int(i)
j = int(j)
v = float(v)
base = A.asformat(format)
self.m = base.copy()
self.i = i
self.j = j
self.v = v
def _timeit(self, kernel, recopy):
min_time = 1e99
if not recopy:
kernel(self.m, self.i, self.j, self.v)
number = 1
start = time.time()
while time.time() - start < 0.1:
if recopy:
m = self.m.copy()
else:
m = self.m
while True:
duration = timeit.timeit(lambda: kernel(m, self.i, self.j, self.v),
number=number)
if duration > 1e-5:
break
else:
number *= 10
min_time = min(min_time, duration/number)
return min_time
def track_fancy_setitem(self, N, sparsity_pattern, format):
def kernel(A, i, j, v):
A[i, j] = v
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
return self._timeit(kernel, sparsity_pattern == 'different')
def track_fancy_getitem(self, N, sparsity_pattern, format):
def kernel(A, i, j, v):
A[i, j]
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
return self._timeit(kernel, sparsity_pattern == 'different')
class NullSlice(Benchmark):
params = [[0.05, 0.01]]
param_names = ['density']
def setup(self, density):
n = 100000
k = 1000
format = 'csr'
self.X = sparse.rand(n, k, format=format, density=density)
def time_3_rows(self, density):
self.X[[0, 100, 105], :]
def time_10000_rows(self, density):
self.X[np.arange(10000), :]
def time_3_cols(self, density):
self.X[:, [0, 100, 105]]
def time_100_cols(self, density):
self.X[:, np.arange(100)]

View file

@ -1,86 +0,0 @@
"""benchmarks for the scipy.sparse.linalg._expm_multiply module"""
from __future__ import division, print_function, absolute_import
import math
import numpy as np
try:
import scipy.linalg
from scipy.sparse.linalg import expm_multiply
except ImportError:
pass
from .common import Benchmark
def random_sparse_csr(m, n, nnz_per_row):
# Copied from the scipy.sparse benchmark.
rows = np.arange(m).repeat(nnz_per_row)
cols = np.random.random_integers(low=0, high=n-1, size=nnz_per_row*m)
vals = np.random.random_sample(m*nnz_per_row)
M = scipy.sparse.coo_matrix((vals,(rows,cols)), (m,n), dtype=float)
return M.tocsr()
def random_sparse_csc(m, n, nnz_per_row):
# Copied from the scipy.sparse benchmark.
rows = np.arange(m).repeat(nnz_per_row)
cols = np.random.random_integers(low=0, high=n-1, size=nnz_per_row*m)
vals = np.random.random_sample(m*nnz_per_row)
M = scipy.sparse.coo_matrix((vals,(rows,cols)), (m,n), dtype=float)
# Use csc instead of csr, because sparse LU decomposition
# raises a warning when I use csr.
return M.tocsc()
class ExpmMultiply(Benchmark):
params = [['sparse', 'full']]
param_names = ['run format']
def setup(self, *args):
self.n = 2000
self.i = 100
self.j = 200
nnz_per_row = 25
self.A = random_sparse_csr(self.n, self.n, nnz_per_row)
self.A_dense = self.A.toarray()
def time_expm_multiply(self, format):
if format == 'full':
# computing full expm of the dense array...
A_expm = scipy.linalg.expm(self.A_dense)
A_expm[self.i, self.j]
else:
# computing only column', j, 'of expm of the sparse matrix...
v = np.zeros(self.n, dtype=float)
v[self.j] = 1
A_expm_col_j = expm_multiply(self.A, v)
A_expm_col_j[self.i]
class Expm(Benchmark):
params = [
[30, 100, 300],
['sparse', 'dense']
]
param_names = ['n', 'format']
def setup(self, n, format):
np.random.seed(1234)
# Let the number of nonzero entries per row
# scale like the log of the order of the matrix.
nnz_per_row = int(math.ceil(math.log(n)))
# time the sampling of a random sparse matrix
self.A_sparse = random_sparse_csc(n, n, nnz_per_row)
# first format conversion
self.A_dense = self.A_sparse.toarray()
def time_expm(self, n, format):
if format == 'sparse':
scipy.linalg.expm(self.A_sparse)
elif format == 'dense':
scipy.linalg.expm(self.A_dense)

View file

@ -1,108 +0,0 @@
from __future__ import division, absolute_import, print_function
from functools import partial
import numpy as np
try:
from scipy import array, r_, ones, arange, sort, diag, cos, rand, pi
from scipy.linalg import eigh, orth, cho_factor, cho_solve
import scipy.sparse
from scipy.sparse.linalg import lobpcg
from scipy.sparse.linalg.interface import LinearOperator
except ImportError:
pass
from .common import Benchmark
def _sakurai(n):
""" Example taken from
T. Sakurai, H. Tadano, Y. Inadomi and U. Nagashima
A moment-based method for large-scale generalized eigenvalue problems
Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004) """
A = scipy.sparse.eye(n, n)
d0 = array(r_[5,6*ones(n-2),5])
d1 = -4*ones(n)
d2 = ones(n)
B = scipy.sparse.spdiags([d2,d1,d0,d1,d2],[-2,-1,0,1,2],n,n)
k = arange(1,n+1)
w_ex = sort(1./(16.*pow(cos(0.5*k*pi/(n+1)),4))) # exact eigenvalues
return A, B, w_ex
def _mikota_pair(n):
# Mikota pair acts as a nice test since the eigenvalues
# are the squares of the integers n, n=1,2,...
x = arange(1,n+1)
B = diag(1./x)
y = arange(n-1,0,-1)
z = arange(2*n-1,0,-2)
A = diag(z)-diag(y,-1)-diag(y,1)
return A.astype(float), B.astype(float)
def _as2d(ar):
if ar.ndim == 2:
return ar
else: # Assume 1!
aux = np.array(ar, copy=False)
aux.shape = (ar.shape[0], 1)
return aux
def _precond(LorU, lower, x):
y = cho_solve((LorU, lower), x)
return _as2d(y)
class Bench(Benchmark):
params = [
[],
['lobpcg', 'eigh']
]
param_names = ['n', 'solver']
def __init__(self):
self.time_mikota.__func__.params = list(self.params)
self.time_mikota.__func__.params[0] = [128, 256, 512, 1024, 2048]
self.time_mikota.__func__.setup = self.setup_mikota
self.time_sakurai.__func__.params = list(self.params)
self.time_sakurai.__func__.params[0] = [50, 400, 2400]
self.time_sakurai.__func__.setup = self.setup_sakurai
def setup_mikota(self, n, solver):
self.shape = (n, n)
self.A, self.B = _mikota_pair(n)
def setup_sakurai(self, n, solver):
self.shape = (n, n)
self.A, self.B, all_eigenvalues = _sakurai(n)
self.A_dense = self.A.A
self.B_dense = self.B.A
def time_mikota(self, n, solver):
m = 10
if solver == 'lobpcg':
X = rand(n, m)
X = orth(X)
LorU, lower = cho_factor(self.A, lower=0, overwrite_a=0)
M = LinearOperator(self.shape,
matvec=partial(_precond, LorU, lower),
matmat=partial(_precond, LorU, lower))
eigs, vecs = lobpcg(self.A, X, self.B, M, tol=1e-4, maxiter=40)
else:
eigh(self.A, self.B, eigvals_only=True, eigvals=(0, m-1))
def time_sakurai(self, n, solver):
m = 3
if solver == 'lobpcg':
X = rand(n, m)
eigs, vecs, resnh = lobpcg(self.A, X, self.B, tol=1e-6, maxiter=500,
retResidualNormsHistory=1)
else:
eigh(self.A_dense, self.B_dense, eigvals_only=True, eigvals=(0, m-1))

View file

@ -1,55 +0,0 @@
"""Compare the speed of exact one-norm calculation vs. its estimation.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
try:
import scipy.sparse.linalg
import scipy.sparse
except ImportError:
pass
from .common import Benchmark
class BenchmarkOneNormEst(Benchmark):
params = [
[2, 3, 5, 10, 30, 100, 300, 500, 1000, 1e4, 1e5, 1e6],
['exact', 'onenormest']
]
param_names = ['n', 'solver']
def setup(self, n, solver):
np.random.seed(1234)
nrepeats = 100
shape = (n, n)
if n <= 1000:
# Sample the matrices.
self.matrices = []
for i in range(nrepeats):
M = np.random.randn(*shape)
self.matrices.append(M)
else:
if solver == 'exact':
raise NotImplementedError()
max_nnz = 100000
nrepeats = 1
self.matrices = []
for i in range(nrepeats):
M = scipy.sparse.rand(shape[0], shape[1], min(max_nnz/(shape[0]*shape[1]), 1e-5))
self.matrices.append(M)
def time_onenormest(self, n, solver):
if solver == 'exact':
# Get the exact values of one-norms of squares.
for M in self.matrices:
M.dot(M)
scipy.sparse.linalg.matfuncs._onenorm(M)
elif solver == 'onenormest':
# Get the estimates of one-norms of squares.
for M in self.matrices:
scipy.sparse.linalg.matfuncs._onenormest_matrix_power(M, 2)

View file

@ -1,64 +0,0 @@
"""
Check the speed of the conjugate gradient solver.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_equal
try:
from scipy import linalg, sparse
from scipy.sparse.linalg import cg
except ImportError:
pass
from .common import Benchmark
def _create_sparse_poisson1d(n):
# Make Gilbert Strang's favorite matrix
# http://www-math.mit.edu/~gs/PIX/cupcakematrix.jpg
P1d = sparse.diags([[-1]*(n-1), [2]*n, [-1]*(n-1)], [-1, 0, 1])
assert_equal(P1d.shape, (n, n))
return P1d
def _create_sparse_poisson2d(n):
P1d = _create_sparse_poisson1d(n)
P2d = sparse.kronsum(P1d, P1d)
assert_equal(P2d.shape, (n*n, n*n))
return P2d
class Bench(Benchmark):
params = [
[4, 6, 10, 16, 25, 40, 64, 100, 160, 250, 400, 640, 1000, 1600],
['sparse', 'dense']
]
param_names = ['(n,n)', 'solver']
def setup(self, n, solver):
dense_is_active = (n**2 < 600)
sparse_is_active = (n**2 < 20000)
if solver == 'dense' and not dense_is_active:
raise NotImplementedError()
if solver == 'sparse' and not sparse_is_active:
raise NotImplementedError()
self.b = np.ones(n*n)
self.P_sparse = _create_sparse_poisson2d(n)
self.P_dense = self.P_sparse.A
def time_cg(self, n, solver):
if solver == 'dense':
linalg.solve(self.P_dense, self.b)
else:
cg(self.P_sparse, self.b)
def time_spsolve(self, n, solver):
if solver == 'dense':
linalg.solve(self.P_dense, self.b)
else:
cg(self.P_sparse, self.b)

View file

@ -1,134 +0,0 @@
from __future__ import division, absolute_import, print_function
import numpy as np
try:
from scipy.spatial import cKDTree, KDTree
except ImportError:
pass
from .common import Benchmark
class Build(Benchmark):
params = [
[(3,10000,1000), (8,10000,1000), (16,10000,1000)],
['KDTree', 'cKDTree'],
]
param_names = ['(m, n, r)', 'class']
def setup(self, mnr, cls_name):
self.cls = KDTree if cls_name == 'KDTree' else cKDTree
m, n, r = mnr
np.random.seed(1234)
self.data = np.concatenate((np.random.randn(n//2,m),
np.random.randn(n-n//2,m)+np.ones(m)))
self.queries = np.concatenate((np.random.randn(r//2,m),
np.random.randn(r-r//2,m)+np.ones(m)))
def time_build(self, mnr, cls_name):
"""
Constructing kd-tree
=======================
dim | # points | time
"""
m, n, r = mnr
if cls_name == 'cKDTree_flat':
self.T = self.cls(self.data, leafsize=n)
else:
self.cls(self.data)
class Query(Benchmark):
params = [
[(3,10000,1000), (8,10000,1000), (16,10000,1000)],
['KDTree', 'cKDTree', 'cKDTree_flat'],
]
param_names = ['(m, n, r)', 'class']
@staticmethod
def do_setup(self, mnr, cls_name):
self.cls = KDTree if cls_name == 'KDTree' else cKDTree
m, n, r = mnr
np.random.seed(1234)
self.data = np.concatenate((np.random.randn(n//2,m),
np.random.randn(n-n//2,m)+np.ones(m)))
self.queries = np.concatenate((np.random.randn(r//2,m),
np.random.randn(r-r//2,m)+np.ones(m)))
if cls_name == 'cKDTree_flat':
self.T = self.cls(self.data, leafsize=n)
else:
self.T = self.cls(self.data)
def setup(self, mnr, cls_name):
Query.do_setup(self, mnr, cls_name)
def time_query(self, mnr, cls_name):
"""
Querying kd-tree
dim | # points | # queries | KDTree | cKDTree | flat cKDTree
"""
self.T.query(self.queries)
class Radius(Benchmark):
params = [
[(3,10000,1000)],
[0.2, 0.5],
['KDTree', 'cKDTree', 'cKDTree_flat'],
]
param_names = ['(m, n, r)', 'probe radius', 'class']
def __init__(self):
self.time_query_pairs.__func__.params = list(self.params)
self.time_query_pairs.__func__.params[0] = [(3,1000,30),
(8,1000,30),
(16,1000,30)]
def setup(self, mnr, probe_radius, cls_name):
Query.do_setup(self, mnr, cls_name)
def time_query_ball_point(self, mnr, probe_radius, cls_name):
self.T.query_ball_point(self.queries, probe_radius)
def time_query_pairs(self, mnr, probe_radius, cls_name):
self.T.query_pairs(probe_radius)
class Neighbors(Benchmark):
params = [
[(3,1000,1000),
(8,1000,1000),
(16,1000,1000)],
[0.2, 0.5],
['KDTree', 'cKDTree'],
]
param_names = ['(m, n1, n2)', 'probe radius', 'class']
def setup(self, mn1n2, probe_radius, cls_str):
m, n1, n2 = mn1n2
cls = KDTree if cls_str == 'KDTree' else cKDTree
data1 = np.concatenate((np.random.randn(n1//2,m),
np.random.randn(n1-n1//2,m)+np.ones(m)))
data2 = np.concatenate((np.random.randn(n2//2,m),
np.random.randn(n2-n2//2,m)+np.ones(m)))
self.T1 = cls(data1)
self.T2 = cls(data2)
def time_sparse_distance_matrix(self, mn1n2, probe_radius, cls_str):
self.T1.sparse_distance_matrix(self.T2, probe_radius)
def time_count_neighbors(self, mn1n2, probe_radius, cls_str):
"""
Count neighbors kd-tree
dim | # points T1 | # points T2 | probe radius | KDTree | cKDTree
"""
self.T1.count_neighbors(self.T2, probe_radius)

View file

@ -1,29 +0,0 @@
from __future__ import division, absolute_import, print_function
import numpy as np
try:
from scipy.special import ai_zeros, bi_zeros, erf
except ImportError:
pass
from .common import Benchmark
class Airy(Benchmark):
def time_ai_zeros(self):
ai_zeros(100000)
def time_bi_zeros(self):
bi_zeros(100000)
class Erf(Benchmark):
def setup(self, *args):
self.rand = np.random.rand(1e5)
def time_real(self, offset):
erf(self.rand + offset)
time_real.params = [0.0, 2.0]
time_real.param_names = ['offset']

View file

@ -1,96 +0,0 @@
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
try:
import scipy.stats as stats
except ImportError:
pass
from .common import Benchmark
class Anderson_KSamp(Benchmark):
def setup(self, *args):
self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)]
def time_anderson_ksamp(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
stats.anderson_ksamp(self.rand)
class CorrelationFunctions(Benchmark):
param_names = ['alternative']
params = [
['two-sided', 'less', 'greater']
]
def setup(self, mode):
a = np.random.rand(2,2) * 10
self.a = a
def time_fisher_exact(self, alternative):
oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative)
class InferentialStats(Benchmark):
def setup(self):
np.random.seed(12345678)
self.a = stats.norm.rvs(loc=5, scale=10, size=500)
self.b = stats.norm.rvs(loc=8, scale=10, size=20)
self.c = stats.norm.rvs(loc=8, scale=20, size=20)
def time_ttest_ind_same_var(self):
# test different sized sample with variances
stats.ttest_ind(self.a, self.b)
stats.ttest_ind(self.a, self.b, equal_var=False)
def time_ttest_ind_diff_var(self):
# test different sized sample with different variances
stats.ttest_ind(self.a, self.c)
stats.ttest_ind(self.a, self.c, equal_var=False)
class Distribution(Benchmark):
param_names = ['distribution', 'properties']
params = [
['cauchy', 'gamma', 'beta'],
['pdf', 'cdf', 'rvs', 'fit']
]
def setup(self, distribution, properties):
np.random.seed(12345678)
self.x = np.random.rand(100)
def time_distribution(self, distribution, properties):
if distribution == 'gamma':
if properties == 'pdf':
stats.gamma.pdf(self.x, a=5, loc=4, scale=10)
elif properties == 'cdf':
stats.gamma.cdf(self.x, a=5, loc=4, scale=10)
elif properties == 'rvs':
stats.gamma.rvs(size=1000, a=5, loc=4, scale=10)
elif properties == 'fit':
stats.gamma.fit(self.x, a=5, loc=4, scale=10)
elif distribution == 'cauchy':
if properties == 'pdf':
stats.cauchy.pdf(self.x, loc=4, scale=10)
elif properties == 'cdf':
stats.cauchy.cdf(self.x, loc=4, scale=10)
elif properties == 'rvs':
stats.cauchy.rvs(size=1000, loc=4, scale=10)
elif properties == 'fit':
stats.cauchy.fit(self.x, loc=4, scale=10)
elif distribution == 'beta':
if properties == 'pdf':
stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'cdf':
stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'rvs':
stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10)
elif properties == 'fit':
stats.beta.fit(self.x, a=5, b=3, loc=4, scale=10)

View file

@ -1,307 +0,0 @@
from __future__ import division
import numpy as np
from numpy import sin, cos, pi, exp, sqrt, abs
class SimpleQuadratic(object):
def fun(self, x):
return np.dot(x, x)
def der(self, x):
return 2. * x
def hess(self, x):
return 2. * np.eye(x.size)
class AsymmetricQuadratic(object):
def fun(self, x):
return np.dot(x, x) + x[0]
def der(self, x):
d = 2. * x
d[0] += 1
return d
def hess(self, x):
return 2. * np.eye(x.size)
class LJ(object):
"""
The Lennard Jones potential
a mathematically simple model that approximates the interaction between a
pair of neutral atoms or molecules.
http://en.wikipedia.org/wiki/Lennard-Jones_potential
E = sum_ij V(r_ij)
where r_ij is the cartesian distance between atom i and atom j, and the
pair potential has the form
V(r) = 4 * eps * ( (sigma / r)**12 - (sigma / r)**6
Notes
-----
the double loop over many atoms makes this *very* slow in Python. If it
were in a compiled language it would be much faster.
"""
def __init__(self, eps=1.0, sig=1.0):
self.sig = sig
self.eps = eps
def vij(self, r):
return 4. * self.eps * ((self.sig / r)**12 - (self.sig / r)**6)
def dvij(self, r):
p7 = 6. / self.sig * (self.sig / r)**7
p13 = -12. / self.sig * (self.sig / r)**13
return 4. * self.eps * (p7 + p13)
def fun(self, coords):
natoms = coords.size // 3
coords = np.reshape(coords, [natoms, 3])
energy = 0.
for i in range(natoms):
for j in range(i + 1, natoms):
dr = coords[j, :] - coords[i, :]
r = np.linalg.norm(dr)
energy += self.vij(r)
return energy
def der(self, coords):
natoms = coords.size // 3
coords = np.reshape(coords, [natoms, 3])
energy = 0.
grad = np.zeros([natoms, 3])
for i in range(natoms):
for j in range(i + 1, natoms):
dr = coords[j, :] - coords[i, :]
r = np.linalg.norm(dr)
energy += self.vij(r)
g = self.dvij(r)
grad[i, :] += -g * dr/r
grad[j, :] += g * dr/r
grad = grad.reshape([natoms * 3])
return grad
def get_random_configuration(self):
rnd = np.random.uniform(-1, 1, [3 * self.natoms])
return rnd * float(self.natoms)**(1. / 3)
class LJ38(LJ):
natoms = 38
target_E = -173.928427
class LJ30(LJ):
natoms = 30
target_E = -128.286571
class LJ20(LJ):
natoms = 20
target_E = -77.177043
class LJ13(LJ):
natoms = 13
target_E = -44.326801
class Booth(object):
target_E = 0.
solution = np.array([1., 3.])
xmin = np.array([-10., -10.])
xmax = np.array([10., 10.])
def fun(self, coords):
x, y = coords
return (x + 2. * y - 7.)**2 + (2. * x + y - 5.)**2
def der(self, coords):
x, y = coords
dfdx = 2. * (x + 2. * y - 7.) + 4. * (2. * x + y - 5.)
dfdy = 4. * (x + 2. * y - 7.) + 2. * (2. * x + y - 5.)
return np.array([dfdx, dfdy])
class Beale(object):
target_E = 0.
solution = np.array([3., 0.5])
xmin = np.array([-4.5, -4.5])
xmax = np.array([4.5, 4.5])
def fun(self, coords):
x, y = coords
p1 = (1.5 - x + x * y)**2
p2 = (2.25 - x + x * y**2)**2
p3 = (2.625 - x + x * y**3)**2
return p1 + p2 + p3
def der(self, coords):
x, y = coords
dfdx = (2. * (1.5 - x + x * y) * (-1. + y) +
2. * (2.25 - x + x * y**2) * (-1. + y**2) +
2. * (2.625 - x + x * y**3) * (-1. + y**3))
dfdy = (2. * (1.5 - x + x * y) * (x) +
2. * (2.25 - x + x * y**2) * (2. * y * x) +
2. * (2.625 - x + x * y**3) * (3. * x * y**2))
return np.array([dfdx, dfdy])
"""
Global Test functions for minimizers.
HolderTable, Ackey and Levi have many competing local minima and are suited
for global minimizers such as basinhopping or differential_evolution.
(http://en.wikipedia.org/wiki/Test_functions_for_optimization)
See also http://mpra.ub.uni-muenchen.de/2718/1/MPRA_paper_2718.pdf
"""
class HolderTable(object):
target_E = -19.2085
solution = [8.05502, 9.66459]
xmin = np.array([-10, -10])
xmax = np.array([10, 10])
stepsize = 2.
temperature = 2.
def fun(self, x):
return - abs(sin(x[0]) * cos(x[1]) * exp(abs(1. - sqrt(x[0]**2 +
x[1]**2) / pi)))
def dabs(self, x):
"""derivative of absolute value"""
if x < 0:
return -1.
elif x > 0:
return 1.
else:
return 0.
#commented out at the because it causes FloatingPointError in
#basinhopping
# def der(self, x):
# R = sqrt(x[0]**2 + x[1]**2)
# g = 1. - R / pi
# f = sin(x[0]) * cos(x[1]) * exp(abs(g))
# E = -abs(f)
#
# dRdx = x[0] / R
# dgdx = - dRdx / pi
# dfdx = cos(x[0]) * cos(x[1]) * exp(abs(g)) + f * self.dabs(g) * dgdx
# dEdx = - self.dabs(f) * dfdx
#
# dRdy = x[1] / R
# dgdy = - dRdy / pi
# dfdy = -sin(x[0]) * sin(x[1]) * exp(abs(g)) + f * self.dabs(g) * dgdy
# dEdy = - self.dabs(f) * dfdy
# return np.array([dEdx, dEdy])
class Ackley(object):
# note: this function is not smooth at the origin. the gradient will never
# converge in the minimizer
target_E = 0.
solution = [0., 0.]
xmin = np.array([-5, -5])
xmax = np.array([5, 5])
def fun(self, x):
E = (-20. * exp(-0.2 * sqrt(0.5 * (x[0]**2 + x[1]**2))) + 20. + np.e -
exp(0.5 * (cos(2. * pi * x[0]) + cos(2. * pi * x[1]))))
return E
def der(self, x):
R = sqrt(x[0]**2 + x[1]**2)
term1 = -20. * exp(-0.2 * R)
term2 = -exp(0.5 * (cos(2. * pi * x[0]) + cos(2. * pi * x[1])))
deriv1 = term1 * (-0.2 * 0.5 / R)
dfdx = 2. * deriv1 * x[0] - term2 * pi * sin(2. * pi * x[0])
dfdy = 2. * deriv1 * x[1] - term2 * pi * sin(2. * pi * x[1])
return np.array([dfdx, dfdy])
class Levi(object):
target_E = 0.
solution = [1., 1.]
xmin = np.array([-10, -10])
xmax = np.array([10, 10])
def fun(self, x):
E = (sin(3. * pi * x[0])**2 + (x[0] - 1.)**2 *
(1. + sin(3 * pi * x[1])**2) +
(x[1] - 1.)**2 * (1. + sin(2 * pi * x[1])**2))
return E
def der(self, x):
dfdx = (2. * 3. * pi *
cos(3. * pi * x[0]) * sin(3. * pi * x[0]) +
2. * (x[0] - 1.) * (1. + sin(3 * pi * x[1])**2))
dfdy = ((x[0] - 1.)**2 * 2. * 3. * pi * cos(3. * pi * x[1]) * sin(3. *
pi * x[1]) + 2. * (x[1] - 1.) *
(1. + sin(2 * pi * x[1])**2) + (x[1] - 1.)**2 *
2. * 2. * pi * cos(2. * pi * x[1]) * sin(2. * pi * x[1]))
return np.array([dfdx, dfdy])
class EggHolder(object):
target_E = -959.6407
solution = [512, 404.2319]
xmin = np.array([-512., -512])
xmax = np.array([512., 512])
def fun(self, x):
a = -(x[1] + 47) * np.sin(np.sqrt(abs(x[1] + x[0]/2. + 47)))
b = -x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47))))
return a + b
class CrossInTray(object):
target_E = -2.06261
solution = [1.34941, -1.34941]
xmin = np.array([-10., -10])
xmax = np.array([10., 10])
def fun(self, x):
arg = abs(100 - sqrt(x[0]**2 + x[1]**2)/pi)
val = np.power(abs(sin(x[0]) * sin(x[1]) * exp(arg)) + 1., 0.1)
return -0.0001 * val
class Schaffer2(object):
target_E = 0
solution = [0., 0.]
xmin = np.array([-100., -100])
xmax = np.array([100., 100])
def fun(self, x):
num = np.power(np.sin(x[0]**2 - x[1]**2), 2) - 0.5
den = np.power(1 + 0.001 * (x[0]**2 + x[1]**2), 2)
return 0.5 + num / den
class Schaffer4(object):
target_E = 0.292579
solution = [0, 1.253131828927371]
xmin = np.array([-100., -100])
xmax = np.array([100., 100])
def fun(self, x):
num = cos(sin(abs(x[0]**2 - x[1]**2)))**2 - 0.5
den = (1+0.001*(x[0]**2 + x[1]**2))**2
return 0.5 + num / den

View file

@ -1,132 +0,0 @@
#!/usr/bin/env python
"""
run.py [options] ASV_COMMAND..
Convenience wrapper around the ``asv`` command; just sets environment
variables and chdirs to the correct place etc.
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import subprocess
import json
import shutil
import argparse
import sysconfig
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
from benchmarks.common import set_mem_rlimit
def main():
class ASVHelpAction(argparse.Action):
nargs = 0
def __call__(self, parser, namespace, values, option_string=None):
sys.exit(run_asv(['--help']))
p = argparse.ArgumentParser(usage=__doc__.strip())
p.add_argument('--help-asv', nargs=0, action=ASVHelpAction,
help="""show ASV help""")
p.add_argument("--current-repo", action="store_true",
help="""use current repository as the upstream repository,
rather than cloning it from the internet; enables running
benchmarks on e.g. your own branches""")
p.add_argument('asv_command', nargs=argparse.REMAINDER)
args = p.parse_args()
sys.exit(run_asv(args.asv_command, current_repo=args.current_repo))
def run_asv(args, current_repo=False):
cwd = os.path.abspath(os.path.dirname(__file__))
if current_repo:
try:
from asv.util import load_json, write_json
conf = load_json(os.path.join(cwd, 'asv.conf.json'))
conf['repo'] = os.path.normpath(os.path.join(cwd, '..'))
cfg_fn = os.path.join(cwd, '.asvconf.tmp')
write_json(cfg_fn, conf)
args = ['--config', cfg_fn] + args
except ImportError:
pass
repo_dir = os.path.join(cwd, 'scipy')
if is_git_repo_root(repo_dir):
if current_repo:
url = os.path.normpath(os.path.join(cwd, '..'))
else:
url = "https://github.com/scipy/scipy.git"
subprocess.call(['git', 'remote', 'set-url', "origin", url],
cwd=repo_dir)
cmd = ['asv'] + list(args)
env = dict(os.environ)
# Inject ccache/f90cache paths
if sys.platform.startswith('linux'):
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
# Control BLAS and CFLAGS
env['OPENBLAS_NUM_THREADS'] = '1'
env['CFLAGS'] = drop_bad_flags(sysconfig.get_config_var('CFLAGS'))
# Limit memory usage
try:
set_mem_rlimit()
except (ImportError, RuntimeError):
pass
# Check scipy version if in dev mode; otherwise clone and setup results
# repository
if args and (args[0] == 'dev' or '--python=same' in args):
import scipy
print("Running benchmarks for Scipy version %s at %s" % (scipy.__version__, scipy.__file__))
# Override gh-pages
if 'gh-pages' in args:
print("gh-pages command is disabled")
return 1
# Run
try:
return subprocess.call(cmd, env=env, cwd=cwd)
except OSError as err:
if err.errno == 2:
print("Error when running '%s': %s\n" % (" ".join(cmd), str(err),))
print("You need to install Airspeed Velocity https://spacetelescope.github.io/asv/")
print("to run Scipy benchmarks")
return 1
raise
def is_git_repo_root(path):
try:
p = subprocess.Popen(['git', '-C', path, 'rev-parse', '--git-dir'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
return False
return (out.strip() == '.git')
except OSError:
return False
def drop_bad_flags(flags):
"""
Drop flags that are problematic for compiling old scipy versions
"""
if not flags:
return flags
return " ".join(x for x in flags.split()
if not (x.startswith("-Werror")
or x in ("-pedantic-errors",)))
if __name__ == "__main__":
sys.exit(main())

View file

@ -1,64 +0,0 @@
Name: scipy
Version: 0.17.0.dev0
Summary: SciPy: Scientific Library for Python
Url: http://www.scipy.org
DownloadUrl: http://sourceforge.net/projects/scipy/files/scipy/
Description:
SciPy (pronounced "Sigh Pie") is open-source software for mathematics,
science, and engineering. The SciPy library depends on NumPy, which
provides convenient and fast N-dimensional array manipulation. The SciPy
library is built to work with NumPy arrays, and provides many user-friendly
and efficient numerical routines such as routines for numerical integration
and optimization. Together, they run on all popular operating systems, are
quick to install, and are free of charge. NumPy and SciPy are easy to use,
but powerful enough to be depended upon by some of the world's leading
scientists and engineers. If you need to manipulate numbers on a computer
and display or publish the results, give SciPy a try!
Maintainer: SciPy Developers
MaintainerEmail: scipy-dev@scipy.org
License: BSD
Platforms: Windows,Linux,Solaris,Mac OS-X,Unix
Classifiers:
Development Status :: 4 - Beta,
Intended Audience :: Science/Research,
Intended Audience :: Developers,
License :: OSI Approved,
Programming Language :: C,
Programming Language :: Python,
Programming Language :: Python :: 3,
Topic :: Software Development,
Topic :: Scientific/Engineering,
Operating System :: Microsoft :: Windows,
Operating System :: POSIX,
Operating System :: Unix,
Operating System :: MacOS
DataFiles: tests
TargetDir: $sitedir/scipy
SourceDir: scipy
Files:
**/tests/*.py,
cluster/tests/*.txt,
fftpack/tests/*.npz,
io/arff/tests/data/*.arff,
io/matlab/tests/data/*.txt,
io/matlab/tests/data/*.mat,
io/tests/data/*.nc,
io/tests/data/*.wav,
io/tests/data/*.sav,
misc/tests/data/*.png,
spatial/tests/data/*.txt,
special/tests/data/*.npz,
weave/scxx/*.h,
weave/scxx/*.cpp,
linalg/cython_blas.pxd,
linalg/cython_lapack.pxd
Recurse: scipy
HookFile: bscript
UseBackends: Waf
Library:
Packages:
scipy

View file

@ -1,229 +0,0 @@
import sys
import os
import subprocess
from numpy.distutils.misc_util \
import \
get_numpy_include_dirs, get_pkg_info
from numpy.distutils.conv_template \
import \
process_str as process_c_str
from numpy.distutils.from_template \
import \
process_str as process_f_str, resolve_includes
from bento.backends.waf_backend \
import \
WAF_TOOLDIR
from bento.backends.waf_tools \
import \
blas_lapack
from bento.commands import hooks
import waflib
from waflib import Options
# Importing setup.py needed to get version info
import setup
# FIXME: add this to numpy so that we can reuse it
class CTemplateTask(waflib.Task.Task):
color = 'BLUE'
#ext_out = ['.c', '.pyf']
def run(self):
s = self.inputs[0]
cnt = s.read()
writestr = process_c_str(cnt)
o = self.outputs[0]
o.write(writestr)
class FTemplateTask(waflib.Task.Task):
color = 'BLUE'
ext_out = ['.pyf']
def run(self):
s = self.inputs[0]
lines = resolve_includes(s.abspath())
writestr = process_f_str("".join(lines))
o = self.outputs[0]
o.write(writestr)
return 0
@waflib.TaskGen.extension(".src")
def c_template(self, node):
output_name = node.name.rsplit(".", 1)[0]
output = node.parent.find_or_declare(output_name)
assert output.is_bld()
ext = output.name.rsplit(".")[1]
if ext in ["f", "pyf", "ipyf"]:
tsk = self.create_task('FTemplateTask', node, output)
if "fc" in self.features:
self.source.append(output)
else:
raise ValueError("FTemplateTask without feature 'fc' (node: %r)?" %
(node,))
elif ext in ["c"]:
tsk = self.create_task('CTemplateTask', node, output)
if "c" in self.features:
self.source.append(output)
else:
raise ValueError("CTemplateTask without feature 'c': %r" % node)
else:
raise ValueError("Unknown extension in templating: %r" % ext)
# FIXME: abstract those module gen tasks...
class write_module(waflib.Task.Task):
color = "CYAN"
vars = ["CONTENT"]
def run(self):
# FIXME: put actual data here
self.outputs[0].write(self.env.CONTENT)
@waflib.TaskGen.feature("gen_pymodule")
def process_write_config(self):
if not hasattr(self, "content"):
raise ValueError("task gen %r expects a 'content' argument" % self.name)
else:
self.env.CONTENT = self.content
output = self.path.find_or_declare(self.target)
name = getattr(self, "name", None) or self.target
bento_context = self.bld.bento_context
b_output = bento_context.build_node.make_node(output.bldpath())
bento_context.outputs_registry.register_outputs(
"gen_pymodule", name, [b_output], bento_context.build_node, "$sitedir")
tsk = self.create_task("write_module")
tsk.set_outputs(output)
return tsk
def _set_mangling_var(conf, u, du, case, f2pycompat=True):
env = conf.env
macros = []
if du == '_':
env['F77_UNDERSCORE_G77'] = 1
macros.append('F77_UNDERSCORE_G77')
if f2pycompat:
macros.append('UNDERSCORE_G77')
else:
env['F77_UNDERSCORE_G77'] = 0
if u == '_':
env['F77_NO_APPEND_FORTRAN'] = 0
else:
env['F77_NO_APPEND_FORTRAN'] = 1
macros.append('F77_NO_APPEND_FORTRAN')
if f2pycompat:
macros.append('NO_APPEND_FORTRAN')
if case == 'upper':
env['F77_UPPERCASE_FORTRAN'] = 1
macros.append('F77_UPPERCASE_FORTRAN')
if f2pycompat:
macros.append('UPPERCASE_FORTRAN')
else:
env['F77_UPPERCASE_FORTRAN'] = 0
env.DEFINES.extend(macros)
def _generate_cython():
print("Cythonizing sources")
cwd = os.path.abspath(os.path.dirname(__file__))
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'scipy'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
@hooks.post_configure
def post_configure(context):
opts = context.waf_options_context
conf = context.waf_context
opts.load("compiler_cxx")
opts.load("compiler_fc")
opts.load("f2py", tooldir=[WAF_TOOLDIR])
Options.options.check_fc = "gfortran"
Options.options.check_cxx_compiler = "g++"
if sys.platform == "win32" and conf.env.CC_NAME == "msvc":
Options.options.check_fc = "ifort"
Options.options.check_cxx_compiler = "msvc"
conf.load("compiler_cxx")
conf.load("compiler_fc")
conf.load("f2py", tooldir=[WAF_TOOLDIR])
conf.load("ordered_c", tooldir=[WAF_TOOLDIR])
conf.load("arch", tooldir=[WAF_TOOLDIR])
if conf.env.CC_NAME == 'gcc':
conf.env.append_value('CFLAGS_PYEXT', "-Wfatal-errors")
conf.env.append_value('CXXFLAGS_PYEXT', "-Wfatal-errors")
if sys.platform == "darwin":
conf.env["MACOSX_DEPLOYMENT_TARGET"] = "10.6"
conf.check_cc_default_arch()
archs = [conf.env.DEFAULT_CC_ARCH]
conf.env.ARCH = archs
conf.check_fortran_verbose_flag()
conf.check_fortran_clib()
conf.check_fortran_dummy_main()
u, du, c = conf.check_fortran_mangling()
_set_mangling_var(conf, u, du, c)
conf.env.INCLUDES = get_numpy_include_dirs()
blas_lapack.check_blas_lapack(context)
if not (conf.env.HAS_BLAS and conf.env.HAS_LAPACK):
raise waflib.Errors.ConfigurationError("You need blas and lapack")
blas_lapack.check_blas_lapack(context)
npymath_info = get_pkg_info("npymath")
conf.parse_flags(npymath_info.cflags() + " " + npymath_info.libs(), "NPYMATH")
_generate_cython()
FULLVERSION, GIT_REVISION = setup.get_version_info()
version_file_content = """\
# THIS FILE IS GENERATED FROM SCIPY BSCRIPT
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
""" % {'version': setup.VERSION,
'full_version' : FULLVERSION,
'git_revision' : GIT_REVISION,
'isrelease': str(setup.ISRELEASED)}
@hooks.pre_build
def pre_build(context):
bld = context.waf_context
context.register_category("gen_pymodule")
bld(features="gen_pymodule",
target="scipy/__config__.py",
content="""\
def show():
pass
""",
always=True)
bld(features="gen_pymodule",
target="scipy/version.py",
content=version_file_content,
always=True)
@hooks.options
def options(global_context):
blas_lapack.add_options(global_context)

View file

@ -1,138 +0,0 @@
API - importing from Scipy
==========================
In Python the distinction between what is the public API of a library and what
are private implementation details is not always clear. Unlike in other
languages like Java, it is possible in Python to access "private" function or
objects. Occasionally this may be convenient, but be aware that if you do so
your code may break without warning in future releases. Some widely understood
rules for what is and isn't public in Python are:
- Methods / functions / classes and module attributes whose names begin
with a leading underscore are private.
- If a class name begins with a leading underscore none of its members are
public, whether or not they begin with a leading underscore.
- If a module name in a package begins with a leading underscore none of
its members are public, whether or not they begin with a leading
underscore.
- If a module or package defines ``__all__`` that authoritatively defines the
public interface.
- If a module or package doesn't define ``__all__`` then all names that don't
start with a leading underscore are public.
.. note:: Reading the above guidelines one could draw the conclusion that every
private module or object starts with an underscore. This is not the
case; the presence of underscores do mark something as private, but
the absence of underscores do not mark something as public.
In Scipy there are modules whose names don't start with an underscore, but that
should be considered private. To clarify which modules these are we define
below what the public API is for Scipy, and give some recommendations for how
to import modules/functions/objects from Scipy.
Guidelines for importing functions from Scipy
---------------------------------------------
The scipy namespace itself only contains functions imported from numpy. These
functions still exist for backwards compatibility, but should be imported from
numpy directly.
Everything in the namespaces of scipy submodules is public. In general, it is
recommended to import functions from submodule namespaces. For example, the
function ``curve_fit`` (defined in scipy/optimize/minpack.py) should be
imported like this::
from scipy import optimize
result = optimize.curve_fit(...)
This form of importing submodules is preferred for all submodules except
``scipy.io`` (because ``io`` is also the name of a module in the Python
stdlib)::
from scipy import interpolate
from scipy import integrate
import scipy.io as spio
In some cases, the public API is one level deeper. For example the
``scipy.sparse.linalg`` module is public, and the functions it contains are not
available in the ``scipy.sparse`` namespace. Sometimes it may result in more
easily understandable code if functions are imported from one level deeper.
For example, in the following it is immediately clear that ``lomax`` is a
distribution if the second form is chosen::
# first form
from scipy import stats
stats.lomax(...)
# second form
from scipy.stats import distributions
distributions.lomax(...)
In that case the second form can be chosen, **if** it is documented in the next
section that the submodule in question is public.
API definition
--------------
Every submodule listed below is public. That means that these submodules are
unlikely to be renamed or changed in an incompatible way, and if that is
necessary a deprecation warning will be raised for one Scipy release before the
change is made.
* scipy.cluster
- vq
- hierarchy
* scipy.constants
* scipy.fftpack
* scipy.integrate
* scipy.interpolate
* scipy.io
- arff
- harwell_boeing
- idl
- matlab
- netcdf
- wavfile
* scipy.linalg
- scipy.linalg.blas
- scipy.linalg.lapack
- scipy.linalg.interpolative
* scipy.misc
* scipy.ndimage
* scipy.odr
* scipy.optimize
* scipy.signal
* scipy.sparse
- linalg
- csgraph
* scipy.spatial
- distance
* scipy.special
* scipy.stats
- distributions
- mstats
* scipy.weave

View file

@ -1,409 +0,0 @@
SciPy modules: maintainers & status
===================================
This document gives an overview of who the maintainers of modules within Scipy
are, what their responsibilities are and what (in their view) are the current
state and possible future directions for the modules.
Maintainers will aim to:
- Respond to tickets, PRs and mailing list questions within a reasonable
time frame.
- Keep an overview of the current status and future directions of the
module up to date in this document.
- Indicate whether PRs are ready to be merged or not, and try to resolve
any differences of opinion on merging code if necessary.
The above are listed as "aims" on purpose. All developers work on Scipy in
their free time and therefore, while we intend to do the best we can in the
limited time available for our open source work, it is possible that we don't
have the bandwidth to respond to every single issue. The goal is to have more
than one maintainer for each module, in which case this should rarely be an
issue.
Some unwritten rules of SciPy development, written down
-------------------------------------------------------
Any significant decisions on adding (or not adding) new features, breaking
backwards compatibility or making other significant changes to the codebase
should be made on the scipy-dev mailing list after a discussion (preferably
with full consensus).
Any non-trivial change (where trivial means a typo, or a one-liner maintenance
commit) has to go in through a pull request (PR). It has to be reviewed by
another developer. In case review doesn't happen quickly enough and it is
important that the PR is merged quickly, the submitter of the PR should send a
message to mailing list saying he/she intends to merge that PR without review
at time X for reason Y unless someone reviews it before then.
Changes and new additions should be tested. Untested code is broken code.
Who gets commit rights is decided by the core development team; changes in
commit rights will then be announced on the scipy-dev mailing list.
Who the core development team is comprised of is a little fuzzy - there are
quite a few people who do have commit rights and would like to keep them but
are no longer active. To get an idea, look at the output of::
$ git shortlog --grep="Merge pull request" -a -c -s <current_release_minus_2>..upstream/master|sort -n
and apply some common sense to it (and don't forget people who are still active
but for some reason never merge PRs).
Guidelines for committers
-------------------------
- When merging contributions, a committer is responsible for ensuring that
those meet the requirements outlined in `HACKING.txt`_. Also check that new
features and backwards compatibility breaks were discussed on the scipy-dev
mailing list.
- New code goes in via a pull request.
- Merge new code with the green button. In case of merge conflicts, ask the PR
submitter to rebase (may require providing some git instructions).
- Backports and trivial additions to finish a PR (really trivial, like a typo
or PEP8 fix) can be pushed directly.
- For PRs that add new features or are in some way complex, wait at least a day
before merging it. That way, others get a chance to comment before the code
goes in.
- Squashing commits or cleaning up commit messages of a PR that you consider
too messy is OK. Make sure though to retain the original author name when
doing this.
- Make sure that the labels and milestone on a merged PR are set correctly.
- When you want to reject a PR: if it's very obvious you can just close it and
explain why, if not obvious then it's a good idea to first explain why you
think the PR is not suitable for inclusion in Scipy and then let a second
committer comment or close.
FAQ
---
Q: How can I keep track of pull requests (PRs) and comments on Github?
A: Only people listed as committers get email notifications for new PRs. And
they don't get it for comments unless they have commented themselves on that PR
before. Github does have a "Watch" feature that can be used to monitor
activity on the repo.
Q: How is this list of maintainers maintained, and how does one become a
maintainer?
A: This list is kept in the Scipy git repository. Changes to it should be made
by sending a pull request. For major changes it is expected that these are
discussed on the scipy-dev mailing list first.
To become a maintainer, someone should have an understanding of the code, have
contributed at least a few features or bug fixes to that code, and express an
interest in being a maintainer.
Q: What is the relationship between the list of maintainers and the list of
people with commit rights?
A: Due to the distributed nature of git, it is easily possible to be a
maintainer without having commit rights. It is also very well possible to be a
committer without being a maintainer for a specific module. In practice
however, there will naturally be a large overlap between the group of
maintainers and committers.
Modules
-------
scipy.cluster
`````````````
Maintainers:
- David Warde-Farley
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.cluster>`__
scipy.constants
```````````````
Maintainers:
- Vincent Davis
Status:
The constants module is up-to-date (NIST 2010 CODATA) and has no known bugs.
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.constants>`__
scipy.fftpack
`````````````
Maintainers:
- David Cournapeau
Status:
The fftpack module is stable, with few known bugs and little development
happening currently.
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.fftpack>`__
scipy.integrate
```````````````
Maintainers:
- Pauli Virtanen
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.integrate>`__
scipy.interpolate
`````````````````
Maintainers:
- Evgeni Burovski
- Pauli Virtanen
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.interpolate>`__
scipy.io
````````
Maintainers:
- Matthew Brett (io.matlab)
- Thomas Robitaille (io.idl)
- David Cournapeau (io.harwell_boeing)
- (io.arff)
- (io.netcdf)
- (io.wavfile)
Status:
The io.idl module is stable and has no known bugs. It will need to be updated
as needed if the IDL file format evolves, or if there are any bugs discovered.
There is no real need for any new features.
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.io>`__
scipy.linalg
````````````
Maintainers:
- Pauli Virtanen
- Alex Griffing
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.linalg>`__
scipy.misc
``````````
Maintainers:
- Ralf Gommers
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.misc>`__
scipy.ndimage
`````````````
Maintainers:
- Stefan van der Walt
- Thouis (Ray) Jones
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.ndimage>`__
scipy.odr
`````````
Maintainers:
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.odr>`__
scipy.optimize
``````````````
Maintainers:
- Denis Laxalde
- Pauli Virtanen
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.optimize>`__
scipy.signal
````````````
Maintainers:
- Ralf Gommers
- Eric Moore
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.signal>`__
scipy.sparse
````````````
Maintainers:
- Pauli Virtanen
- Alex Griffing
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.sparse>`__
scipy.sparse.csgraph
````````````````````
Maintainers:
- Jake Vanderplas
Status:
`Open tickets
<https://github.com/scipy/scipy/issues?labels=scipy.sparse.csgraph>`__
scipy.sparse.linalg
```````````````````
Maintainers:
- Pauli Virtanen
- Alex Griffing
- Jake Vanderplas
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.sparse.linalg>`__
scipy.spatial
`````````````
Maintainers:
- Pauli Virtanen (Qhull wrappers)
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.spatial>`__
scipy.special
`````````````
Maintainers:
- Pauli Virtanen
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.special>`__
scipy.stats
```````````
Maintainers:
- Josef Perktold
- Ralf Gommers
- Evgeni Burovski
Status:
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.stats>`__
scipy.weave
```````````
Status:
Weave is deprecated and will be removed before the 1.0 release. It is now
packaged as a separate (unmaintained) package: https://github.com/scipy/weave.
For new code, it is recommended to use Cython instead of Weave.
`Open tickets <https://github.com/scipy/scipy/issues?labels=scipy.weave>`__
Infrastructure and web presence
-------------------------------
Websites
````````
Maintainers:
- Pauli Virtanen
- Ralf Gommers
- Thomas Kluyver
- Scott Sinclair
Status:
The scipy.org site has been redesigned and is currently in good shape.
It is hosted on a server maintained by Enthought. Issues and pull requests
should be opened or sent to https://github.com/scipy/scipy.org
Continuous Integration server
`````````````````````````````
TravisCI is the only CI server used at the moment; it tests with all Python
versions on Ubuntu.
Build tools
```````````
Maintainers:
- Ralf Gommers (Bento support)
- David Cournapeau
- Pauli Virtanen
Other
`````
- Release manager: rotating role, Pauli Virtanen for 0.15.x
- Planet SciPy: Gael Varoquaux
.. _HACKING.txt: https://github.com/scipy/scipy/blob/master/HACKING.rst.txt

View file

@ -1,238 +0,0 @@
# Makefile for Sphinx documentation
#
<<<<<<< HEAD
PYVER =
=======
PYVER = 2.7
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
PYTHON = python$(PYVER)
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = LANG=C sphinx-build
PAPER =
<<<<<<< HEAD
NEED_AUTOSUMMARY = $(shell $(PYTHON) -c 'import sphinx; print sphinx.__version__ < "0.7" and "1" or ""')
=======
FILES=
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html web pickle htmlhelp latex changes linkcheck \
dist dist-build
#------------------------------------------------------------------------------
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
<<<<<<< HEAD
=======
@echo " html-scipyorg to make standalone HTML files with scipy.org theming"
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
@echo " pickle to make pickle files (usable by e.g. sphinx-web)"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " changes to make an overview over all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " dist PYVER=... to make a distribution-ready tree"
@echo " upload USER=... to upload results to docs.scipy.org"
clean:
-rm -rf build/* source/generated
#------------------------------------------------------------------------------
# Automated generation of all documents
#------------------------------------------------------------------------------
# Build the current scipy version, and extract docs from it.
# We have to be careful of some issues:
#
# - Everything must be done using the same Python version
# - We must use eggs (otherwise they might override PYTHONPATH on import).
# - Different versions of easy_install install to different directories (!)
#
INSTALL_DIR = $(CURDIR)/build/inst-dist/
INSTALL_PPH = $(INSTALL_DIR)/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/lib/python$(PYVER)/dist-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/dist-packages
DIST_VARS=PYTHON="PYTHONPATH=$(INSTALL_PPH):$$PYTHONPATH python$(PYVER)" SPHINXBUILD="LANG=C PYTHONPATH=$(INSTALL_PPH):$$PYTHONPATH python$(PYVER) `which sphinx-build`"
<<<<<<< HEAD
UPLOAD_TARGET = $(USER)@docs.scipy.org:/home/docserver/www-root/doc/scipy/
upload:
@test -e build/dist || { echo "make dist is required first"; exit 1; }
@test output-is-fine -nt build/dist || { \
echo "Review the output in build/dist, and do 'touch output-is-fine' before uploading."; exit 1; }
rsync -r -z --delete-after -p \
$(if $(shell test -f build/dist/scipy-ref.pdf && echo "y"),, \
--exclude '**-ref.pdf' --exclude '**-user.pdf') \
$(if $(shell test -f build/dist/scipy-chm.zip && echo "y"),, \
--exclude '**-chm.zip') \
build/dist/ $(UPLOAD_TARGET)
dist:
make $(DIST_VARS) real-dist
real-dist: dist-build html
=======
dist:
make $(DIST_VARS) real-dist
real-dist: dist-build html html-scipyorg
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
test -d build/latex || make latex
make -C build/latex all-pdf
-test -d build/htmlhelp || make htmlhelp-build
-rm -rf build/dist
mkdir -p build/dist
<<<<<<< HEAD
cp -r build/html build/dist/reference
touch build/dist/index.html
perl -pi -e 's#^\s*(<li><a href=".*?">SciPy.*?Reference Guide.*?&raquo;</li>)\s*$$#<li><a href="/">Numpy and Scipy Documentation</a> &raquo;</li> $$1#;' build/dist/*.html build/dist/*/*.html build/dist/*/*/*.html
(cd build/html && zip -9qr ../dist/scipy-html.zip .)
cp build/latex/scipy*.pdf build/dist
-zip build/dist/scipy-chm.zip build/htmlhelp/scipy.chm
cd build/dist && tar czf ../dist.tar.gz *
chmod ug=rwX,o=rX -R build/dist
find build/dist -type d -print0 | xargs -0r chmod g+s
=======
cp -r build/html-scipyorg build/dist/reference
touch build/dist/index.html
(cd build/html && zip -9qr ../dist/scipy-html.zip .)
cp build/latex/scipy*.pdf build/dist
-zip build/dist/scipy-chm.zip build/htmlhelp/scipy.chm
chmod ug=rwX,o=rX -R build/dist
find build/dist -type d -print0 | xargs -0r chmod g+s
cd build/dist && tar czf ../dist.tar.gz *
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
dist-build:
rm -f ../dist/*.egg
cd .. && $(PYTHON) setupegg.py bdist_egg
install -d $(subst :, ,$(INSTALL_PPH))
$(PYTHON) `which easy_install` --prefix=$(INSTALL_DIR) ../dist/*.egg
#------------------------------------------------------------------------------
# Basic Sphinx generation rules for different formats
#------------------------------------------------------------------------------
<<<<<<< HEAD
generate: build/generate-stamp
build/generate-stamp: $(wildcard source/*.rst)
mkdir -p build
ifeq ($(NEED_AUTOSUMMARY),1)
$(PYTHON) \
./sphinxext/autosummary_generate.py source/*.rst \
-p dump.xml -o source/generated
endif
touch build/generate-stamp
html: generate
mkdir -p build/html build/doctrees
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html
$(PYTHON) postprocess.py html build/html/*.html
@echo
@echo "Build finished. The HTML pages are in build/html."
pickle: generate
mkdir -p build/pickle build/doctrees
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle
=======
html:
mkdir -p build/html build/doctrees
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html $(FILES)
@echo
@echo "Build finished. The HTML pages are in build/html."
html-scipyorg:
mkdir -p build/html build/doctrees
$(SPHINXBUILD) -t scipyorg -b html $(ALLSPHINXOPTS) build/html-scipyorg $(FILES)
@echo
@echo "Build finished. The HTML pages are in build/html-scipyorg."
pickle:
mkdir -p build/pickle build/doctrees
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle $(FILES)
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
@echo
@echo "Build finished; now you can process the pickle files or run"
@echo " sphinx-web build/pickle"
@echo "to start the sphinx-web server."
web: pickle
<<<<<<< HEAD
htmlhelp: generate
mkdir -p build/htmlhelp build/doctrees
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp
=======
htmlhelp:
mkdir -p build/htmlhelp build/doctrees
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp $(FILES)
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in build/htmlhelp."
htmlhelp-build: htmlhelp build/htmlhelp/scipy.chm
%.chm: %.hhp
-hhc.exe $^
<<<<<<< HEAD
latex: generate
mkdir -p build/latex build/doctrees
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex
=======
latex:
mkdir -p build/latex build/doctrees
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex $(FILES)
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
$(PYTHON) postprocess.py tex build/latex/*.tex
perl -pi -e 's/\t(latex.*|pdflatex) (.*)/\t-$$1 -interaction batchmode $$2/' build/latex/Makefile
@echo
@echo "Build finished; the LaTeX files are in build/latex."
@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
"run these through (pdf)latex."
coverage: build
mkdir -p build/coverage build/doctrees
<<<<<<< HEAD
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) build/coverage
@echo "Coverage finished; see c.txt and python.txt in build/coverage"
changes: generate
mkdir -p build/changes build/doctrees
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes
@echo
@echo "The overview file is in build/changes."
linkcheck: generate
mkdir -p build/linkcheck build/doctrees
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck
=======
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) build/coverage $(FILES)
@echo "Coverage finished; see c.txt and python.txt in build/coverage"
changes:
mkdir -p build/changes build/doctrees
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes $(FILES)
@echo
@echo "The overview file is in build/changes."
linkcheck:
mkdir -p build/linkcheck build/doctrees
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck $(FILES)
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in build/linkcheck/output.txt."

View file

@ -1,20 +0,0 @@
SciPy Documentation
===================
How to build it
---------------
The easy way to build the documentation is to run
python setup.py build_sphinx
This will first build Scipy in-place, and then generate documentation for it.
Another way
-----------
1. Optionally download an XML dump of the newest docstrings from the doc wiki
at ``/pydocweb/dump`` and save it as ``dump.xml``.
2. Run ``make html`` or ``make dist``
Note that ``make html`` builds the documentation for the currently installed
version of Scipy, not the one corresponding to the source code here.

View file

@ -1,70 +0,0 @@
#!/usr/bin/env python
"""
%prog MODE FILES...
Post-processes HTML and Latex files output by Sphinx.
MODE is either 'html' or 'tex'.
"""
import re, optparse
def main():
p = optparse.OptionParser(__doc__)
options, args = p.parse_args()
if len(args) < 1:
p.error('no mode given')
mode = args.pop(0)
if mode not in ('html', 'tex'):
p.error('unknown mode %s' % mode)
for fn in args:
f = open(fn, 'r')
try:
if mode == 'html':
lines = process_html(fn, f.readlines())
elif mode == 'tex':
lines = process_tex(f.readlines())
finally:
f.close()
f = open(fn, 'w')
f.write("".join(lines))
f.close()
def process_html(fn, lines):
return lines
def process_tex(lines):
"""
<<<<<<< HEAD
Remove unnecessary section titles from the LaTeX file,
and convert UTF-8 non-breaking spaces to Latex nbsps.
"""
new_lines = []
for line in lines:
if re.match(r'^\\(section|subsection|subsubsection|paragraph|subparagraph){(numpy|scipy)\.', line):
=======
Remove unnecessary section titles from the LaTeX file.
"""
new_lines = []
for line in lines:
line = re.sub(r'^\s*\\strong{See Also:}\s*$', r'\paragraph{See Also}', line)
if (line.startswith(r'\section{scipy.')
or line.startswith(r'\subsection{scipy.')
or line.startswith(r'\subsubsection{scipy.')
or line.startswith(r'\paragraph{scipy.')
or line.startswith(r'\subparagraph{scipy.')
):
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
pass # skip!
else:
new_lines.append(line)
return new_lines
if __name__ == "__main__":
main()

View file

@ -1,221 +0,0 @@
==========================
SciPy 0.10.0 Release Notes
==========================
.. contents::
SciPy 0.10.0 is the culmination of 8 months of hard work. It contains
many new features, numerous bug-fixes, improved test coverage and
better documentation. There have been a limited number of deprecations
and backwards-incompatible changes in this release, which are documented
below. All users are encouraged to upgrade to this release, as there
are a large number of bug-fixes and optimizations. Moreover, our
development attention will now shift to bug-fix releases on the 0.10.x
branch, and on adding new features on the development master branch.
Release highlights:
- Support for Bento as optional build system.
- Support for generalized eigenvalue problems, and all shift-invert modes
available in ARPACK.
This release requires Python 2.4-2.7 or 3.1- and NumPy 1.5 or greater.
New features
============
Bento: new optional build system
--------------------------------
Scipy can now be built with `Bento <http://cournape.github.com/Bento/>`_.
Bento has some nice features like parallel builds and partial rebuilds, that
are not possible with the default build system (distutils). For usage
instructions see BENTO_BUILD.txt in the scipy top-level directory.
Currently Scipy has three build systems, distutils, numscons and bento.
Numscons is deprecated and is planned and will likely be removed in the next
release.
Generalized and shift-invert eigenvalue problems in ``scipy.sparse.linalg``
---------------------------------------------------------------------------
The sparse eigenvalue problem solver functions
``scipy.sparse.eigs/eigh`` now support generalized eigenvalue
problems, and all shift-invert modes available in ARPACK.
Discrete-Time Linear Systems (``scipy.signal``)
-----------------------------------------------
Support for simulating discrete-time linear systems, including
``scipy.signal.dlsim``, ``scipy.signal.dimpulse``, and ``scipy.signal.dstep``,
has been added to SciPy. Conversion of linear systems from continuous-time to
discrete-time representations is also present via the
``scipy.signal.cont2discrete`` function.
Enhancements to ``scipy.signal``
--------------------------------
A Lomb-Scargle periodogram can now be computed with the new function
``scipy.signal.lombscargle``.
The forward-backward filter function ``scipy.signal.filtfilt`` can now
filter the data in a given axis of an n-dimensional numpy array.
(Previously it only handled a 1-dimensional array.) Options have been
added to allow more control over how the data is extended before filtering.
FIR filter design with ``scipy.signal.firwin2`` now has options to create
filters of type III (zero at zero and Nyquist frequencies) and IV (zero at zero
frequency).
Additional decomposition options (``scipy.linalg``)
---------------------------------------------------
A sort keyword has been added to the Schur decomposition routine
(``scipy.linalg.schur``) to allow the sorting of eigenvalues in
the resultant Schur form.
Additional special matrices (``scipy.linalg``)
----------------------------------------------
The functions ``hilbert`` and ``invhilbert`` were added to ``scipy.linalg``.
Enhancements to ``scipy.stats``
-------------------------------
* The *one-sided form* of Fisher's exact test is now also implemented in
``stats.fisher_exact``.
* The function ``stats.chi2_contingency`` for computing the chi-square test of
independence of factors in a contingency table has been added, along with
the related utility functions ``stats.contingency.margins`` and
``stats.contingency.expected_freq``.
Enhancements to ``scipy.special``
---------------------------------
The functions ``logit(p) = log(p/(1-p))``
and ``expit(x) = 1/(1+exp(-x))`` have been implemented as
``scipy.special.logit`` and ``scipy.special.expit`` respectively.
Basic support for Harwell-Boeing file format for sparse matrices
----------------------------------------------------------------
Both read and write are support through a simple function-based API, as well as
a more complete API to control number format. The functions may be found in
scipy.sparse.io.
The following features are supported:
* Read and write sparse matrices in the CSC format
* Only real, symmetric, assembled matrix are supported (RUA format)
Deprecated features
===================
``scipy.maxentropy``
--------------------
The maxentropy module is unmaintained, rarely used and has not been functioning
well for several releases. Therefore it has been deprecated for this release,
and will be removed for scipy 0.11. Logistic regression in scikits.learn is a
good alternative for this functionality. The ``scipy.maxentropy.logsumexp``
function has been moved to ``scipy.misc``.
``scipy.lib.blas``
------------------
There are similar BLAS wrappers in ``scipy.linalg`` and ``scipy.lib``. These
have now been consolidated as ``scipy.linalg.blas``, and ``scipy.lib.blas`` is
deprecated.
Numscons build system
---------------------
The numscons build system is being replaced by Bento, and will be removed in
one of the next scipy releases.
Backwards-incompatible changes
==============================
The deprecated name `invnorm` was removed from ``scipy.stats.distributions``,
this distribution is available as `invgauss`.
The following deprecated nonlinear solvers from ``scipy.optimize`` have been
removed::
- ``broyden_modified`` (bad performance)
- ``broyden1_modified`` (bad performance)
- ``broyden_generalized`` (equivalent to ``anderson``)
- ``anderson2`` (equivalent to ``anderson``)
- ``broyden3`` (obsoleted by new limited-memory broyden methods)
- ``vackar`` (renamed to ``diagbroyden``)
Other changes
=============
``scipy.constants`` has been updated with the CODATA 2010 constants.
``__all__`` dicts have been added to all modules, which has cleaned up the
namespaces (particularly useful for interactive work).
An API section has been added to the documentation, giving recommended import
guidelines and specifying which submodules are public and which aren't.
Authors
=======
This release contains work by the following people (contributed at least
one patch to this release, names in alphabetical order):
* Jeff Armstrong +
* Matthew Brett
* Lars Buitinck +
* David Cournapeau
* FI$H 2000 +
* Michael McNeil Forbes +
* Matty G +
* Christoph Gohlke
* Ralf Gommers
* Yaroslav Halchenko
* Charles Harris
* Thouis (Ray) Jones +
* Chris Jordan-Squire +
* Robert Kern
* Chris Lasher +
* Wes McKinney +
* Travis Oliphant
* Fabian Pedregosa
* Josef Perktold
* Thomas Robitaille +
* Pim Schellart +
* Anthony Scopatz +
* Skipper Seabold +
* Fazlul Shahriar +
* David Simcha +
* Scott Sinclair +
* Andrey Smirnov +
* Collin RM Stocks +
* Martin Teichmann +
* Jake Vanderplas +
* Gaël Varoquaux +
* Pauli Virtanen
* Stefan van der Walt
* Warren Weckesser
* Mark Wiebe +
A total of 35 people contributed to this release.
People with a "+" by their names contributed a patch for the first time.

View file

@ -1,33 +0,0 @@
==========================
SciPy 0.10.1 Release Notes
==========================
.. contents::
SciPy 0.10.1 is a bug-fix release with no new features compared to 0.10.0.
Main changes
------------
The most important changes are:
1. The single precision routines of ``eigs`` and ``eigsh`` in
``scipy.sparse.linalg`` have been disabled (they internally use double
precision now).
2. A compatibility issue related to changes in NumPy macros has been fixed, in
order to make scipy 0.10.1 compile with the upcoming numpy 1.7.0 release.
Other issues fixed
------------------
- #835: stats: nan propagation in stats.distributions
- #1202: io: netcdf segfault
- #1531: optimize: make curve_fit work with method as callable.
- #1560: linalg: fixed mistake in eig_banded documentation.
- #1565: ndimage: bug in ndimage.variance
- #1457: ndimage: standard_deviation does not work with sequence of indexes
- #1562: cluster: segfault in linkage function
- #1568: stats: One-sided fisher_exact() returns `p` < 1 for 0 successful attempts
- #1575: stats: zscore and zmap handle the axis keyword incorrectly

View file

@ -1,316 +0,0 @@
==========================
SciPy 0.11.0 Release Notes
==========================
.. contents::
SciPy 0.11.0 is the culmination of 8 months of hard work. It contains
many new features, numerous bug-fixes, improved test coverage and
better documentation. Highlights of this release are:
- A new module has been added which provides a number of common sparse graph
algorithms.
- New unified interfaces to the existing optimization and root finding
functions have been added.
All users are encouraged to upgrade to this release, as there are a large
number of bug-fixes and optimizations. Our development attention will now
shift to bug-fix releases on the 0.11.x branch, and on adding new features on
the master branch.
This release requires Python 2.4-2.7 or 3.1-3.2 and NumPy 1.5.1 or greater.
New features
============
Sparse Graph Submodule
----------------------
The new submodule :mod:`scipy.sparse.csgraph` implements a number of efficient
graph algorithms for graphs stored as sparse adjacency matrices. Available
routines are:
- :func:`connected_components` - determine connected components of a graph
- :func:`laplacian` - compute the laplacian of a graph
- :func:`shortest_path` - compute the shortest path between points on a
positive graph
- :func:`dijkstra` - use Dijkstra's algorithm for shortest path
- :func:`floyd_warshall` - use the Floyd-Warshall algorithm for
shortest path
- :func:`breadth_first_order` - compute a breadth-first order of nodes
- :func:`depth_first_order` - compute a depth-first order of nodes
- :func:`breadth_first_tree` - construct the breadth-first tree from
a given node
- :func:`depth_first_tree` - construct a depth-first tree from a given node
- :func:`minimum_spanning_tree` - construct the minimum spanning
tree of a graph
``scipy.optimize`` improvements
-------------------------------
The optimize module has received a lot of attention this release. In addition
to added tests, documentation improvements, bug fixes and code clean-up, the
following improvements were made:
- A unified interface to minimizers of univariate and multivariate
functions has been added.
- A unified interface to root finding algorithms for multivariate functions
has been added.
- The L-BFGS-B algorithm has been updated to version 3.0.
Unified interfaces to minimizers
````````````````````````````````
Two new functions ``scipy.optimize.minimize`` and
``scipy.optimize.minimize_scalar`` were added to provide a common interface
to minimizers of multivariate and univariate functions respectively.
For multivariate functions, ``scipy.optimize.minimize`` provides an
interface to methods for unconstrained optimization (`fmin`, `fmin_powell`,
`fmin_cg`, `fmin_ncg`, `fmin_bfgs` and `anneal`) or constrained
optimization (`fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla` and `fmin_slsqp`).
For univariate functions, ``scipy.optimize.minimize_scalar`` provides an
interface to methods for unconstrained and bounded optimization (`brent`,
`golden`, `fminbound`).
This allows for easier comparing and switching between solvers.
Unified interface to root finding algorithms
````````````````````````````````````````````
The new function ``scipy.optimize.root`` provides a common interface to
root finding algorithms for multivariate functions, embeding `fsolve`,
`leastsq` and `nonlin` solvers.
``scipy.linalg`` improvements
-----------------------------
New matrix equation solvers
```````````````````````````
Solvers for the Sylvester equation (``scipy.linalg.solve_sylvester``, discrete
and continuous Lyapunov equations (``scipy.linalg.solve_lyapunov``,
``scipy.linalg.solve_discrete_lyapunov``) and discrete and continuous algebraic
Riccati equations (``scipy.linalg.solve_continuous_are``,
``scipy.linalg.solve_discrete_are``) have been added to ``scipy.linalg``.
These solvers are often used in the field of linear control theory.
QZ and QR Decomposition
````````````````````````
It is now possible to calculate the QZ, or Generalized Schur, decomposition
using ``scipy.linalg.qz``. This function wraps the LAPACK routines sgges,
dgges, cgges, and zgges.
The function ``scipy.linalg.qr_multiply``, which allows efficient computation
of the matrix product of Q (from a QR decomposition) and a vector, has been
added.
Pascal matrices
```````````````
A function for creating Pascal matrices, ``scipy.linalg.pascal``, was added.
Sparse matrix construction and operations
-----------------------------------------
Two new functions, ``scipy.sparse.diags`` and ``scipy.sparse.block_diag``, were
added to easily construct diagonal and block-diagonal sparse matrices
respectively.
``scipy.sparse.csc_matrix`` and ``csr_matrix`` now support the operations
``sin``, ``tan``, ``arcsin``, ``arctan``, ``sinh``, ``tanh``, ``arcsinh``,
``arctanh``, ``rint``, ``sign``, ``expm1``, ``log1p``, ``deg2rad``, ``rad2deg``,
``floor``, ``ceil`` and ``trunc``. Previously, these operations had to be
performed by operating on the matrices' ``data`` attribute.
LSMR iterative solver
---------------------
LSMR, an iterative method for solving (sparse) linear and linear
least-squares systems, was added as ``scipy.sparse.linalg.lsmr``.
Discrete Sine Transform
-----------------------
Bindings for the discrete sine transform functions have been added to
``scipy.fftpack``.
``scipy.interpolate`` improvements
----------------------------------
For interpolation in spherical coordinates, the three classes
``scipy.interpolate.SmoothSphereBivariateSpline``,
``scipy.interpolate.LSQSphereBivariateSpline``, and
``scipy.interpolate.RectSphereBivariateSpline`` have been added.
Binned statistics (``scipy.stats``)
-----------------------------------
The stats module has gained functions to do binned statistics, which are a
generalization of histograms, in 1-D, 2-D and multiple dimensions:
``scipy.stats.binned_statistic``, ``scipy.stats.binned_statistic_2d`` and
``scipy.stats.binned_statistic_dd``.
Deprecated features
===================
``scipy.sparse.cs_graph_components`` has been made a part of the sparse graph
submodule, and renamed to ``scipy.sparse.csgraph.connected_components``.
Calling the former routine will result in a deprecation warning.
``scipy.misc.radon`` has been deprecated. A more full-featured radon transform
can be found in scikits-image.
``scipy.io.save_as_module`` has been deprecated. A better way to save multiple
Numpy arrays is the ``numpy.savez`` function.
The `xa` and `xb` parameters for all distributions in
``scipy.stats.distributions`` already weren't used; they have now been
deprecated.
Backwards incompatible changes
==============================
Removal of ``scipy.maxentropy``
-------------------------------
The ``scipy.maxentropy`` module, which was deprecated in the 0.10.0 release,
has been removed. Logistic regression in scikits.learn is a good and modern
alternative for this functionality.
Minor change in behavior of ``splev``
-------------------------------------
The spline evaluation function now behaves similarly to ``interp1d``
for size-1 arrays. Previous behavior::
>>> from scipy.interpolate import splev, splrep, interp1d
>>> x = [1,2,3,4,5]
>>> y = [4,5,6,7,8]
>>> tck = splrep(x, y)
>>> splev([1], tck)
4.
>>> splev(1, tck)
4.
Corrected behavior::
>>> splev([1], tck)
array([ 4.])
>>> splev(1, tck)
array(4.)
This affects also the ``UnivariateSpline`` classes.
Behavior of ``scipy.integrate.complex_ode``
-------------------------------------------
The behavior of the ``y`` attribute of ``complex_ode`` is changed.
Previously, it expressed the complex-valued solution in the form::
z = ode.y[::2] + 1j * ode.y[1::2]
Now, it is directly the complex-valued solution::
z = ode.y
Minor change in behavior of T-tests
-----------------------------------
The T-tests ``scipy.stats.ttest_ind``, ``scipy.stats.ttest_rel`` and
``scipy.stats.ttest_1samp`` have been changed so that 0 / 0 now returns NaN
instead of 1.
Other changes
=============
The SuperLU sources in ``scipy.sparse.linalg`` have been updated to version 4.3
from upstream.
The function ``scipy.signal.bode``, which calculates magnitude and phase data
for a continuous-time system, has been added.
The two-sample T-test ``scipy.stats.ttest_ind`` gained an option to compare
samples with unequal variances, i.e. Welch's T-test.
``scipy.misc.logsumexp`` now takes an optional ``axis`` keyword argument.
Authors
=======
This release contains work by the following people (contributed at least
one patch to this release, names in alphabetical order):
* Jeff Armstrong
* Chad Baker
* Brandon Beacher +
* behrisch +
* borishim +
* Matthew Brett
* Lars Buitinck
* Luis Pedro Coelho +
* Johann Cohen-Tanugi
* David Cournapeau
* dougal +
* Ali Ebrahim +
* endolith +
* Bjørn Forsman +
* Robert Gantner +
* Sebastian Gassner +
* Christoph Gohlke
* Ralf Gommers
* Yaroslav Halchenko
* Charles Harris
* Jonathan Helmus +
* Andreas Hilboll +
* Marc Honnorat +
* Jonathan Hunt +
* Maxim Ivanov +
* Thouis (Ray) Jones
* Christopher Kuster +
* Josh Lawrence +
* Denis Laxalde +
* Travis Oliphant
* Joonas Paalasmaa +
* Fabian Pedregosa
* Josef Perktold
* Gavin Price +
* Jim Radford +
* Andrew Schein +
* Skipper Seabold
* Jacob Silterra +
* Scott Sinclair
* Alexis Tabary +
* Martin Teichmann
* Matt Terry +
* Nicky van Foreest +
* Jacob Vanderplas
* Patrick Varilly +
* Pauli Virtanen
* Nils Wagner +
* Darryl Wally +
* Stefan van der Walt
* Liming Wang +
* David Warde-Farley +
* Warren Weckesser
* Sebastian Werk +
* Mike Wimmer +
* Tony S Yu +
A total of 55 people contributed to this release.
People with a "+" by their names contributed a patch for the first time.

View file

@ -1,240 +0,0 @@
==========================
SciPy 0.12.0 Release Notes
==========================
.. contents::
SciPy 0.12.0 is the culmination of 7 months of hard work. It contains
many new features, numerous bug-fixes, improved test coverage and
better documentation. There have been a number of deprecations and
API changes in this release, which are documented below. All users
are encouraged to upgrade to this release, as there are a large number
of bug-fixes and optimizations. Moreover, our development attention
will now shift to bug-fix releases on the 0.12.x branch, and on adding
new features on the master branch.
Some of the highlights of this release are:
- Completed QHull wrappers in scipy.spatial.
- cKDTree now a drop-in replacement for KDTree.
- A new global optimizer, basinhopping.
- Support for Python 2 and Python 3 from the same code base (no more 2to3).
This release requires Python 2.6, 2.7 or 3.1-3.3 and NumPy 1.5.1 or greater.
Support for Python 2.4 and 2.5 has been dropped as of this release.
New features
============
``scipy.spatial`` improvements
------------------------------
cKDTree feature-complete
^^^^^^^^^^^^^^^^^^^^^^^^
Cython version of KDTree, cKDTree, is now feature-complete. Most operations
(construction, query, query_ball_point, query_pairs, count_neighbors and
sparse_distance_matrix) are between 200 and 1000 times faster in cKDTree than
in KDTree. With very minor caveats, cKDTree has exactly the same interface as
KDTree, and can be used as a drop-in replacement.
Voronoi diagrams and convex hulls
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
`scipy.spatial` now contains functionality for computing Voronoi
diagrams and convex hulls using the Qhull library. (Delaunay
triangulation was available since Scipy 0.9.0.)
Delaunay improvements
^^^^^^^^^^^^^^^^^^^^^
It's now possible to pass in custom Qhull options in Delaunay
triangulation. Coplanar points are now also recorded, if present.
Incremental construction of Delaunay triangulations is now also
possible.
Spectral estimators (``scipy.signal``)
--------------------------------------
The functions ``scipy.signal.periodogram`` and ``scipy.signal.welch`` were
added, providing DFT-based spectral estimators.
``scipy.optimize`` improvements
-------------------------------
Callback functions in L-BFGS-B and TNC
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A callback mechanism was added to L-BFGS-B and TNC minimization solvers.
Basin hopping global optimization (``scipy.optimize.basinhopping``)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A new global optimization algorithm. Basinhopping is designed to efficiently
find the global minimum of a smooth function.
``scipy.special`` improvements
------------------------------
Revised complex error functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The computation of special functions related to the error function now uses a
new `Faddeeva library from MIT <http://ab-initio.mit.edu/Faddeeva>`__ which
increases their numerical precision. The scaled and imaginary error functions
``erfcx`` and ``erfi`` were also added, and the Dawson integral ``dawsn`` can
now be evaluated for a complex argument.
Faster orthogonal polynomials
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Evaluation of orthogonal polynomials (the ``eval_*`` routines) in now
faster in ``scipy.special``, and their ``out=`` argument functions
properly.
``scipy.sparse.linalg`` features
--------------------------------
- In ``scipy.sparse.linalg.spsolve``, the ``b`` argument can now be either
a vector or a matrix.
- ``scipy.sparse.linalg.inv`` was added. This uses ``spsolve`` to compute
a sparse matrix inverse.
- ``scipy.sparse.linalg.expm`` was added. This computes the exponential of
a sparse matrix using a similar algorithm to the existing dense array
implementation in ``scipy.linalg.expm``.
Listing Matlab(R) file contents in ``scipy.io``
-----------------------------------------------
A new function ``whosmat`` is available in ``scipy.io`` for inspecting contents
of MAT files without reading them to memory.
Documented BLAS and LAPACK low-level interfaces (``scipy.linalg``)
------------------------------------------------------------------
The modules `scipy.linalg.blas` and `scipy.linalg.lapack` can be used
to access low-level BLAS and LAPACK functions.
Polynomial interpolation improvements (``scipy.interpolate``)
-------------------------------------------------------------
The barycentric, Krogh, piecewise and pchip polynomial interpolators in
``scipy.interpolate`` accept now an ``axis`` argument.
Deprecated features
===================
`scipy.lib.lapack`
------------------
The module `scipy.lib.lapack` is deprecated. You can use `scipy.linalg.lapack`
instead. The module `scipy.lib.blas` was deprecated earlier in Scipy 0.10.0.
`fblas` and `cblas`
-------------------
Accessing the modules `scipy.linalg.fblas`, `cblas`, `flapack`, `clapack` is
deprecated. Instead, use the modules `scipy.linalg.lapack` and
`scipy.linalg.blas`.
Backwards incompatible changes
==============================
Removal of ``scipy.io.save_as_module``
--------------------------------------
The function ``scipy.io.save_as_module`` was deprecated in Scipy 0.11.0, and is
now removed.
Its private support modules ``scipy.io.dumbdbm_patched`` and
``scipy.io.dumb_shelve`` are also removed.
`axis` argument added to `scipy.stats.scoreatpercentile`
--------------------------------------------------------
The function `scipy.stats.scoreatpercentile` has been given an `axis`
argument. The default argument is `axis=None`, which means the calculation
is done on the flattened array. Before this change, `scoreatpercentile`
would act as if `axis=0` had been given. Code using `scoreatpercentile`
with a multidimensional array will need to add `axis=0` to the function call
to preserve the old behavior. (This API change was not noticed until
long after the release of 0.12.0.)
Authors
=======
* Anton Akhmerov +
* Alexander Eberspächer +
* Anne Archibald
* Jisk Attema +
* K.-Michael Aye +
* bemasc +
* Sebastian Berg +
* François Boulogne +
* Matthew Brett
* Lars Buitinck
* Steven Byrnes +
* Tim Cera +
* Christian +
* Keith Clawson +
* David Cournapeau
* Nathan Crock +
* endolith
* Bradley M. Froehle +
* Matthew R Goodman
* Christoph Gohlke
* Ralf Gommers
* Robert David Grant +
* Yaroslav Halchenko
* Charles Harris
* Jonathan Helmus
* Andreas Hilboll
* Hugo +
* Oleksandr Huziy
* Jeroen Demeyer +
* Johannes Schönberger +
* Steven G. Johnson +
* Chris Jordan-Squire
* Jonathan Taylor +
* Niklas Kroeger +
* Jerome Kieffer +
* kingson +
* Josh Lawrence
* Denis Laxalde
* Alex Leach +
* Tim Leslie
* Richard Lindsley +
* Lorenzo Luengo +
* Stephen McQuay +
* MinRK
* Sturla Molden +
* Eric Moore +
* mszep +
* Matt Newville +
* Vlad Niculae
* Travis Oliphant
* David Parker +
* Fabian Pedregosa
* Josef Perktold
* Zach Ploskey +
* Alex Reinhart +
* Gilles Rochefort +
* Ciro Duran Santillli +
* Jan Schlueter +
* Jonathan Scholz +
* Anthony Scopatz
* Skipper Seabold
* Fabrice Silva +
* Scott Sinclair
* Jacob Stevenson +
* Sturla Molden +
* Julian Taylor +
* thorstenkranz +
* John Travers +
* True Price +
* Nicky van Foreest
* Jacob Vanderplas
* Patrick Varilly
* Daniel Velkov +
* Pauli Virtanen
* Stefan van der Walt
* Warren Weckesser
A total of 75 people contributed to this release.
People with a "+" by their names contributed a patch for the first time.

View file

@ -1,8 +0,0 @@
==========================
SciPy 0.12.1 Release Notes
==========================
SciPy 0.12.1 is a bug-fix release with no new features compared to 0.12.0.
The single issue fixed by this release is a security issue in ``scipy.weave``,
which was previously using temporary directories in an insecure manner under
certain circumstances.

View file

@ -1,373 +0,0 @@
==========================
SciPy 0.13.0 Release Notes
==========================
.. contents::
SciPy 0.13.0 is the culmination of 7 months of hard work. It contains
many new features, numerous bug-fixes, improved test coverage and
better documentation. There have been a number of deprecations and
API changes in this release, which are documented below. All users
are encouraged to upgrade to this release, as there are a large number
of bug-fixes and optimizations. Moreover, our development attention
will now shift to bug-fix releases on the 0.13.x branch, and on adding
new features on the master branch.
This release requires Python 2.6, 2.7 or 3.1-3.3 and NumPy 1.5.1 or greater.
Highlights of this release are:
- support for fancy indexing and boolean comparisons with sparse matrices
- interpolative decompositions and matrix functions in the linalg module
- two new trust-region solvers for unconstrained minimization
New features
============
``scipy.integrate`` improvements
--------------------------------
N-dimensional numerical integration
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A new function `scipy.integrate.nquad`, which provides N-dimensional
integration functionality with a more flexible interface than ``dblquad`` and
``tplquad``, has been added.
``dopri*`` improvements
^^^^^^^^^^^^^^^^^^^^^^^
The intermediate results from the ``dopri`` family of ODE solvers can now be
accessed by a *solout* callback function.
``scipy.linalg`` improvements
-----------------------------
Interpolative decompositions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Scipy now includes a new module `scipy.linalg.interpolative`
containing routines for computing interpolative matrix decompositions
(ID). This feature is based on the ID software package by
P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, and M. Tygert, previously
adapted for Python in the PymatrixId package by K.L. Ho.
Polar decomposition
^^^^^^^^^^^^^^^^^^^
A new function `scipy.linalg.polar`, to compute the polar decomposition
of a matrix, was added.
BLAS level 3 functions
^^^^^^^^^^^^^^^^^^^^^^
The BLAS functions ``symm``, ``syrk``, ``syr2k``, ``hemm``, ``herk`` and
``her2k`` are now wrapped in `scipy.linalg`.
Matrix functions
^^^^^^^^^^^^^^^^
Several matrix function algorithms have been implemented or updated following
detailed descriptions in recent papers of Nick Higham and his co-authors.
These include the matrix square root (``sqrtm``), the matrix logarithm
(``logm``), the matrix exponential (``expm``) and its Frechet derivative
(``expm_frechet``), and fractional matrix powers (``fractional_matrix_power``).
``scipy.optimize`` improvements
-------------------------------
Trust-region unconstrained minimization algorithms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``minimize`` function gained two trust-region solvers for unconstrained
minimization: ``dogleg`` and ``trust-ncg``.
``scipy.sparse`` improvements
-----------------------------
Boolean comparisons and sparse matrices
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
All sparse matrix types now support boolean data, and boolean operations. Two
sparse matrices `A` and `B` can be compared in all the expected ways `A < B`,
`A >= B`, `A != B`, producing similar results as dense Numpy arrays.
Comparisons with dense matrices and scalars are also supported.
CSR and CSC fancy indexing
^^^^^^^^^^^^^^^^^^^^^^^^^^
Compressed sparse row and column sparse matrix types now support fancy indexing
with boolean matrices, slices, and lists. So where A is a (CSC or CSR) sparse
matrix, you can do things like::
>>> A[A > 0.5] = 1 # since Boolean sparse matrices work
>>> A[:2, :3] = 2
>>> A[[1,2], 2] = 3
``scipy.sparse.linalg`` improvements
------------------------------------
The new function ``onenormest`` provides a lower bound of the 1-norm of a
linear operator and has been implemented according to Higham and Tisseur
(2000). This function is not only useful for sparse matrices, but can also be
used to estimate the norm of products or powers of dense matrices without
explicitly building the intermediate matrix.
The multiplicative action of the matrix exponential of a linear operator
(``expm_multiply``) has been implemented following the description in Al-Mohy
and Higham (2011).
Abstract linear operators (`scipy.sparse.linalg.LinearOperator`) can now be
multiplied, added to each other, and exponentiated, producing new linear
operators. This enables easier construction of composite linear operations.
``scipy.spatial`` improvements
------------------------------
The vertices of a `ConvexHull` can now be accessed via the `vertices` attribute,
which gives proper orientation in 2-D.
``scipy.signal`` improvements
-----------------------------
The cosine window function `scipy.signal.cosine` was added.
``scipy.special`` improvements
------------------------------
New functions `scipy.special.xlogy` and `scipy.special.xlog1py` were added.
These functions can simplify and speed up code that has to calculate
``x * log(y)`` and give 0 when ``x == 0``.
``scipy.io`` improvements
-------------------------
Unformatted Fortran file reader
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The new class `scipy.io.FortranFile` facilitates reading unformatted
sequential files written by Fortran code.
``scipy.io.wavfile`` enhancements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
`scipy.io.wavfile.write` now accepts a file buffer. Previously it only
accepted a filename.
`scipy.io.wavfile.read` and `scipy.io.wavfile.write` can now handle floating
point WAV files.
``scipy.interpolate`` improvements
----------------------------------
B-spline derivatives and antiderivatives
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
`scipy.interpolate.splder` and `scipy.interpolate.splantider` functions
for computing B-splines that represent derivatives and antiderivatives
of B-splines were added. These functions are also available in the
class-based FITPACK interface as ``UnivariateSpline.derivative`` and
``UnivariateSpline.antiderivative``.
``scipy.stats`` improvements
----------------------------
Distributions now allow using keyword parameters in addition to
positional parameters in all methods.
The function `scipy.stats.power_divergence` has been added for the
Cressie-Read power divergence statistic and goodness of fit test.
Included in this family of statistics is the "G-test"
(http://en.wikipedia.org/wiki/G-test).
`scipy.stats.mood` now accepts multidimensional input.
An option was added to `scipy.stats.wilcoxon` for continuity correction.
`scipy.stats.chisquare` now has an `axis` argument.
`scipy.stats.mstats.chisquare` now has `axis` and `ddof` arguments.
Deprecated features
===================
``expm2`` and ``expm3``
-----------------------
The matrix exponential functions `scipy.linalg.expm2` and `scipy.linalg.expm3`
are deprecated. All users should use the numerically more robust
`scipy.linalg.expm` function instead.
``scipy.stats`` functions
-------------------------
`scipy.stats.oneway` is deprecated; `scipy.stats.f_oneway` should be used
instead.
`scipy.stats.glm` is deprecated. `scipy.stats.ttest_ind` is an equivalent
function; more full-featured general (and generalized) linear model
implementations can be found in statsmodels.
`scipy.stats.cmedian` is deprecated; ``numpy.median`` should be used instead.
Backwards incompatible changes
==============================
LIL matrix assignment
---------------------
Assigning values to LIL matrices with two index arrays now works similarly as
assigning into ndarrays::
>>> x = lil_matrix((3, 3))
>>> x[[0,1,2],[0,1,2]]=[0,1,2]
>>> x.todense()
matrix([[ 0., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 2.]])
rather than giving the result::
>>> x.todense()
matrix([[ 0., 1., 2.],
[ 0., 1., 2.],
[ 0., 1., 2.]])
Users relying on the previous behavior will need to revisit their code.
The previous behavior is obtained by ``x[numpy.ix_([0,1,2],[0,1,2])] = ...``.
Deprecated ``radon`` function removed
-------------------------------------
The ``misc.radon`` function, which was deprecated in scipy 0.11.0, has been
removed. Users can find a more full-featured ``radon`` function in
scikit-image.
Removed deprecated keywords ``xa`` and ``xb`` from ``stats.distributions``
--------------------------------------------------------------------------
The keywords ``xa`` and ``xb``, which were deprecated since 0.11.0, have
been removed from the distributions in ``scipy.stats``.
Changes to MATLAB file readers / writers
----------------------------------------
The major change is that 1D arrays in numpy now become row vectors (shape 1, N)
when saved to a MATLAB 5 format file. Previously 1D arrays saved as column
vectors (N, 1). This is to harmonize the behavior of writing MATLAB 4 and 5
formats, and adapt to the defaults of numpy and MATLAB - for example
``np.atleast_2d`` returns 1D arrays as row vectors.
Trying to save arrays of greater than 2 dimensions in MATLAB 4 format now raises
an error instead of silently reshaping the array as 2D.
``scipy.io.loadmat('afile')`` used to look for `afile` on the Python system path
(``sys.path``); now ``loadmat`` only looks in the current directory for a
relative path filename.
Other changes
=============
Security fix: ``scipy.weave`` previously used temporary directories in an
insecure manner under certain circumstances.
Cython is now required to build *unreleased* versions of scipy.
The C files generated from Cython sources are not included in the git repo
anymore. They are however still shipped in source releases.
The code base received a fairly large PEP8 cleanup. A ``tox pep8``
command has been added; new code should pass this test command.
Scipy cannot be compiled with gfortran 4.1 anymore (at least on RH5), likely
due to that compiler version not supporting entry constructs well.
Authors
=======
This release contains work by the following people (contributed at least
one patch to this release, names in alphabetical order):
* Jorge Cañardo Alastuey +
* Tom Aldcroft +
* Max Bolingbroke +
* Joseph Jon Booker +
* François Boulogne
* Matthew Brett
* Christian Brodbeck +
* Per Brodtkorb +
* Christian Brueffer +
* Lars Buitinck
* Evgeni Burovski +
* Tim Cera
* Lawrence Chan +
* David Cournapeau
* Dražen Lučanin +
* Alexander J. Dunlap +
* endolith
* André Gaul +
* Christoph Gohlke
* Ralf Gommers
* Alex Griffing +
* Blake Griffith +
* Charles Harris
* Bob Helmbold +
* Andreas Hilboll
* Kat Huang +
* Oleksandr (Sasha) Huziy +
* Gert-Ludwig Ingold +
* Thouis (Ray) Jones
* Juan Luis Cano Rodríguez +
* Robert Kern
* Andreas Kloeckner +
* Sytse Knypstra +
* Gustav Larsson +
* Denis Laxalde
* Christopher Lee
* Tim Leslie
* Wendy Liu +
* Clemens Novak +
* Takuya Oshima +
* Josef Perktold
* Illia Polosukhin +
* Przemek Porebski +
* Steve Richardson +
* Branden Rolston +
* Skipper Seabold
* Fazlul Shahriar
* Leo Singer +
* Rohit Sivaprasad +
* Daniel B. Smith +
* Julian Taylor
* Louis Thibault +
* Tomas Tomecek +
* John Travers
* Richard Tsai +
* Jacob Vanderplas
* Patrick Varilly
* Pauli Virtanen
* Stefan van der Walt
* Warren Weckesser
* Pedro Werneck +
* Nils Werner +
* Michael Wimmer +
* Nathan Woods +
* Tony S. Yu +
A total of 65 people contributed to this release.
People with a "+" by their names contributed a patch for the first time.

View file

@ -1,15 +0,0 @@
==========================
SciPy 0.13.1 Release Notes
==========================
SciPy 0.13.1 is a bug-fix release with no new features compared to 0.13.0.
The only changes are several fixes in ``ndimage``, one of which was a serious
regression in ``ndimage.label`` (Github issue 3025), which gave
incorrect results in 0.13.0.
Issues fixed
------------
- 3025: ``ndimage.label`` returns incorrect results in scipy 0.13.0
- 1992: ``ndimage.label`` return type changed from int32 to uint32
- 1992: ``ndimage.find_objects`` doesn't work with int32 input in some cases

View file

@ -1,13 +0,0 @@
==========================
SciPy 0.13.2 Release Notes
==========================
SciPy 0.13.2 is a bug-fix release with no new features compared to 0.13.1.
Issues fixed
------------
- 3096: require Cython 0.19, earlier versions have memory leaks in fused types
- 3079: ``ndimage.label`` fix swapped 64-bitness test
- 3108: ``optimize.fmin_slsqp`` constraint violation

View file

@ -1,497 +0,0 @@
==========================
SciPy 0.14.0 Release Notes
==========================
.. contents::
SciPy 0.14.0 is the culmination of 8 months of hard work. It contains
many new features, numerous bug-fixes, improved test coverage and
better documentation. There have been a number of deprecations and
API changes in this release, which are documented below. All users
are encouraged to upgrade to this release, as there are a large number
of bug-fixes and optimizations. Moreover, our development attention
will now shift to bug-fix releases on the 0.14.x branch, and on adding
new features on the master branch.
This release requires Python 2.6, 2.7 or 3.2-3.4 and NumPy 1.5.1 or greater.
New features
============
``scipy.interpolate`` improvements
----------------------------------
A new wrapper function `scipy.interpolate.interpn` for interpolation on regular
grids has been added. `interpn` supports linear and nearest-neighbor
interpolation in arbitrary dimensions and spline interpolation in two
dimensions.
Faster implementations of piecewise polynomials in power and Bernstein
polynomial bases have been added as `scipy.interpolate.PPoly` and
`scipy.interpolate.BPoly`. New users should use these in favor of
`scipy.interpolate.PiecewisePolynomial`.
`scipy.interpolate.interp1d` now accepts non-monotonic inputs and sorts them.
If performance is critical, sorting can be turned off by using the new
``assume_sorted`` keyword.
Functionality for evaluation of bivariate spline derivatives in
``scipy.interpolate`` has been added.
The new class `scipy.interpolate.Akima1DInterpolator` implements the piecewise
cubic polynomial interpolation scheme devised by H. Akima.
Functionality for fast interpolation on regular, unevenly spaced grids
in arbitrary dimensions has been added as
`scipy.interpolate.RegularGridInterpolator` .
``scipy.linalg`` improvements
-----------------------------
The new function `scipy.linalg.dft` computes the matrix of the
discrete Fourier transform.
A condition number estimation function for matrix exponential,
`scipy.linalg.expm_cond`, has been added.
``scipy.optimize`` improvements
-------------------------------
A set of benchmarks for optimize, which can be run with ``optimize.bench()``,
has been added.
`scipy.optimize.curve_fit` now has more controllable error estimation via the
``absolute_sigma`` keyword.
Support for passing custom minimization methods to ``optimize.minimize()``
and ``optimize.minimize_scalar()`` has been added, currently useful especially
for combining ``optimize.basinhopping()`` with custom local optimizer routines.
``scipy.stats`` improvements
----------------------------
A new class `scipy.stats.multivariate_normal` with functionality for
multivariate normal random variables has been added.
A lot of work on the ``scipy.stats`` distribution framework has been done.
Moment calculations (skew and kurtosis mainly) are fixed and verified, all
examples are now runnable, and many small accuracy and performance improvements
for individual distributions were merged.
The new function `scipy.stats.anderson_ksamp` computes the k-sample
Anderson-Darling test for the null hypothesis that k samples come from
the same parent population.
``scipy.signal`` improvements
-----------------------------
``scipy.signal.iirfilter`` and related functions to design Butterworth,
Chebyshev, elliptical and Bessel IIR filters now all use pole-zero ("zpk")
format internally instead of using transformations to numerator/denominator
format. The accuracy of the produced filters, especially high-order ones, is
improved significantly as a result.
The Savitzky-Golay filter was added with the new functions
`scipy.signal.savgol_filter` and `scipy.signal.savgol_coeffs`.
The new function `scipy.signal.vectorstrength` computes the vector strength,
a measure of phase synchrony, of a set of events.
``scipy.special`` improvements
------------------------------
The functions `scipy.special.boxcox` and `scipy.special.boxcox1p`, which
compute the Box-Cox transformation, have been added.
``scipy.sparse`` improvements
-----------------------------
- Significant performance improvement in CSR, CSC, and DOK indexing speed.
- When using Numpy >= 1.9 (to be released in MM 2014), sparse matrices function
correctly when given to arguments of ``np.dot``, ``np.multiply`` and other
ufuncs. With earlier Numpy and Scipy versions, the results of such
operations are undefined and usually unexpected.
- Sparse matrices are no longer limited to ``2^31`` nonzero elements. They
automatically switch to using 64-bit index data type for matrices containing
more elements. User code written assuming the sparse matrices use int32 as
the index data type will continue to work, except for such large matrices.
Code dealing with larger matrices needs to accept either int32 or int64
indices.
Deprecated features
===================
``anneal``
----------
The global minimization function `scipy.optimize.anneal` is deprecated.
All users should use the `scipy.optimize.basinhopping` function instead.
``scipy.stats``
---------------
``randwcdf`` and ``randwppf`` functions are deprecated. All users should use
distribution-specific ``rvs`` methods instead.
Probability calculation aliases ``zprob``, ``fprob`` and ``ksprob`` are
deprecated. Use instead the ``sf`` methods of the corresponding distributions
or the ``special`` functions directly.
``scipy.interpolate``
---------------------
``PiecewisePolynomial`` class is deprecated.
Backwards incompatible changes
==============================
scipy.special.lpmn
------------------
``lpmn`` no longer accepts complex-valued arguments. A new function
``clpmn`` with uniform complex analytic behavior has been added, and
it should be used instead.
scipy.sparse.linalg
-------------------
Eigenvectors in the case of generalized eigenvalue problem are normalized to
unit vectors in 2-norm, rather than following the LAPACK normalization
convention.
The deprecated UMFPACK wrapper in ``scipy.sparse.linalg`` has been removed due
to license and install issues. If available, ``scikits.umfpack`` is still used
transparently in the ``spsolve`` and ``factorized`` functions. Otherwise,
SuperLU is used instead in these functions.
scipy.stats
-----------
The deprecated functions ``glm``, ``oneway`` and ``cmedian`` have been removed
from ``scipy.stats``.
``stats.scoreatpercentile`` now returns an array instead of a list of
percentiles.
scipy.interpolate
-----------------
The API for computing derivatives of a monotone piecewise interpolation has
changed: if `p` is a ``PchipInterpolator`` object, `p.derivative(der)`
returns a callable object representing the derivative of `p`. For in-place
derivatives use the second argument of the `__call__` method:
`p(0.1, der=2)` evaluates the second derivative of `p` at `x=0.1`.
The method `p.derivatives` has been removed.
Other changes
=============
Authors
=======
* Marc Abramowitz +
* Anders Bech Borchersen +
* Vincent Arel-Bundock +
* Petr Baudis +
* Max Bolingbroke
* François Boulogne
* Matthew Brett
* Lars Buitinck
* Evgeni Burovski
* CJ Carey +
* Thomas A Caswell +
* Pawel Chojnacki +
* Phillip Cloud +
* Stefano Costa +
* David Cournapeau
* David Menendez Hurtado +
* Matthieu Dartiailh +
* Christoph Deil +
* Jörg Dietrich +
* endolith
* Francisco de la Peña +
* Ben FrantzDale +
* Jim Garrison +
* André Gaul
* Christoph Gohlke
* Ralf Gommers
* Robert David Grant
* Alex Griffing
* Blake Griffith
* Yaroslav Halchenko
* Andreas Hilboll
* Kat Huang
* Gert-Ludwig Ingold
* James T. Webber +
* Dorota Jarecka +
* Todd Jennings +
* Thouis (Ray) Jones
* Juan Luis Cano Rodríguez
* ktritz +
* Jacques Kvam +
* Eric Larson +
* Justin Lavoie +
* Denis Laxalde
* Jussi Leinonen +
* lemonlaug +
* Tim Leslie
* Alain Leufroy +
* George Lewis +
* Max Linke +
* Brandon Liu +
* Benny Malengier +
* Matthias Kümmerer +
* Cimarron Mittelsteadt +
* Eric Moore
* Andrew Nelson +
* Niklas Hambüchen +
* Joel Nothman +
* Clemens Novak
* Emanuele Olivetti +
* Stefan Otte +
* peb +
* Josef Perktold
* pjwerneck
* poolio
* Jérôme Roy +
* Carl Sandrock +
* Andrew Sczesnak +
* Shauna +
* Fabrice Silva
* Daniel B. Smith
* Patrick Snape +
* Thomas Spura +
* Jacob Stevenson
* Julian Taylor
* Tomas Tomecek
* Richard Tsai
* Jacob Vanderplas
* Joris Vankerschaver +
* Pauli Virtanen
* Warren Weckesser
A total of 80 people contributed to this release.
People with a "+" by their names contributed a patch for the first time.
This list of names is automatically generated, and may not be fully complete.
Issues closed
-------------
- `#1325 <https://github.com/scipy/scipy/issues/1325>`__: add custom axis keyword to dendrogram function in scipy.cluster.hierarchy...
- `#1437 <https://github.com/scipy/scipy/issues/1437>`__: Wrong pochhammer symbol for negative integers (Trac #910)
- `#1555 <https://github.com/scipy/scipy/issues/1555>`__: scipy.io.netcdf leaks file descriptors (Trac #1028)
- `#1569 <https://github.com/scipy/scipy/issues/1569>`__: sparse matrix failed with element-wise multiplication using numpy.multiply()...
- `#1833 <https://github.com/scipy/scipy/issues/1833>`__: Sparse matrices are limited to 2^32 non-zero elements (Trac #1307)
- `#1834 <https://github.com/scipy/scipy/issues/1834>`__: scipy.linalg.eig does not normalize eigenvector if B is given...
- `#1866 <https://github.com/scipy/scipy/issues/1866>`__: stats for invgamma (Trac #1340)
- `#1886 <https://github.com/scipy/scipy/issues/1886>`__: stats.zipf floating point warnings (Trac #1361)
- `#1887 <https://github.com/scipy/scipy/issues/1887>`__: Stats continuous distributions - floating point warnings (Trac...
- `#1897 <https://github.com/scipy/scipy/issues/1897>`__: scoreatpercentile() does not handle empty list inputs (Trac #1372)
- `#1918 <https://github.com/scipy/scipy/issues/1918>`__: splint returns incorrect results (Trac #1393)
- `#1949 <https://github.com/scipy/scipy/issues/1949>`__: kurtosistest fails in mstats with type error (Trac #1424)
- `#2092 <https://github.com/scipy/scipy/issues/2092>`__: scipy.test leaves darwin27compiled_catalog, cpp and so files...
- `#2106 <https://github.com/scipy/scipy/issues/2106>`__: stats ENH: shape parameters in distribution docstrings (Trac...
- `#2123 <https://github.com/scipy/scipy/issues/2123>`__: Bad behavior of sparse matrices in a binary ufunc (Trac #1598)
- `#2152 <https://github.com/scipy/scipy/issues/2152>`__: Fix mmio/fromfile on gzip on Python 3 (Trac #1627)
- `#2164 <https://github.com/scipy/scipy/issues/2164>`__: stats.rice.pdf(x, 0) returns nan (Trac #1639)
- `#2169 <https://github.com/scipy/scipy/issues/2169>`__: scipy.optimize.fmin_bfgs not handling functions with boundaries...
- `#2177 <https://github.com/scipy/scipy/issues/2177>`__: scipy.cluster.hierarchy.ClusterNode.pre_order returns IndexError...
- `#2179 <https://github.com/scipy/scipy/issues/2179>`__: coo.todense() segfaults (Trac #1654)
- `#2185 <https://github.com/scipy/scipy/issues/2185>`__: Precision of scipy.ndimage.gaussian_filter*() limited (Trac #1660)
- `#2186 <https://github.com/scipy/scipy/issues/2186>`__: scipy.stats.mstats.kurtosistest crashes on 1d input (Trac #1661)
- `#2238 <https://github.com/scipy/scipy/issues/2238>`__: Negative p-value on hypergeom.cdf (Trac #1719)
- `#2283 <https://github.com/scipy/scipy/issues/2283>`__: ascending order in interpolation routines (Trac #1764)
- `#2288 <https://github.com/scipy/scipy/issues/2288>`__: mstats.kurtosistest is incorrectly converting to float, and fails...
- `#2396 <https://github.com/scipy/scipy/issues/2396>`__: lpmn wrong results for ``|z| > 1`` (Trac #1877)
- `#2398 <https://github.com/scipy/scipy/issues/2398>`__: ss2tf returns num as 2D array instead of 1D (Trac #1879)
- `#2406 <https://github.com/scipy/scipy/issues/2406>`__: linkage does not take Unicode strings as method names (Trac #1887)
- `#2443 <https://github.com/scipy/scipy/issues/2443>`__: IIR filter design should not transform to tf representation internally
- `#2572 <https://github.com/scipy/scipy/issues/2572>`__: class method solve of splu return object corrupted or falsely...
- `#2667 <https://github.com/scipy/scipy/issues/2667>`__: stats endless loop ?
- `#2671 <https://github.com/scipy/scipy/issues/2671>`__: .stats.hypergeom documentation error in the note about pmf
- `#2691 <https://github.com/scipy/scipy/issues/2691>`__: BUG scipy.linalg.lapack: potrf/ptroi interpret their 'lower'...
- `#2721 <https://github.com/scipy/scipy/issues/2721>`__: Allow use of ellipsis in scipy.sparse slicing
- `#2741 <https://github.com/scipy/scipy/issues/2741>`__: stats: deprecate and remove alias for special functions
- `#2742 <https://github.com/scipy/scipy/issues/2742>`__: stats add rvs to rice distribution
- `#2765 <https://github.com/scipy/scipy/issues/2765>`__: bugs stats entropy
- `#2832 <https://github.com/scipy/scipy/issues/2832>`__: argrelextrema returns tuple of 2 empty arrays when no peaks found...
- `#2861 <https://github.com/scipy/scipy/issues/2861>`__: scipy.stats.scoreatpercentile broken for vector `per`
- `#2891 <https://github.com/scipy/scipy/issues/2891>`__: COBYLA successful termination when constraints violated
- `#2919 <https://github.com/scipy/scipy/issues/2919>`__: test failure with the current master
- `#2922 <https://github.com/scipy/scipy/issues/2922>`__: ndimage.percentile_filter ignores origin argument for multidimensional...
- `#2938 <https://github.com/scipy/scipy/issues/2938>`__: Sparse/dense matrix inplace operations fail due to __numpy_ufunc__
- `#2944 <https://github.com/scipy/scipy/issues/2944>`__: MacPorts builds yield 40Mb worth of build warnings
- `#2945 <https://github.com/scipy/scipy/issues/2945>`__: FAIL: test_random_complex (test_basic.TestDet)
- `#2947 <https://github.com/scipy/scipy/issues/2947>`__: FAIL: Test some trivial edge cases for savgol_filter()
- `#2953 <https://github.com/scipy/scipy/issues/2953>`__: Scipy Delaunay triangulation is not oriented
- `#2971 <https://github.com/scipy/scipy/issues/2971>`__: scipy.stats.mstats.winsorize documentation error
- `#2980 <https://github.com/scipy/scipy/issues/2980>`__: Problems running what seems a perfectly valid example
- `#2996 <https://github.com/scipy/scipy/issues/2996>`__: entropy for rv_discrete is incorrect?!
- `#2998 <https://github.com/scipy/scipy/issues/2998>`__: Fix numpy version comparisons
- `#3002 <https://github.com/scipy/scipy/issues/3002>`__: python setup.py install fails
- `#3014 <https://github.com/scipy/scipy/issues/3014>`__: Bug in stats.fisher_exact
- `#3030 <https://github.com/scipy/scipy/issues/3030>`__: relative entropy using scipy.stats.distribution.entropy when...
- `#3037 <https://github.com/scipy/scipy/issues/3037>`__: scipy.optimize.curve_fit leads to unexpected behavior when input...
- `#3047 <https://github.com/scipy/scipy/issues/3047>`__: mstats.ttest_rel axis=None, requires masked array
- `#3059 <https://github.com/scipy/scipy/issues/3059>`__: BUG: Slices of sparse matrices return incorrect dtype
- `#3063 <https://github.com/scipy/scipy/issues/3063>`__: range keyword in binned_statistics incorrect
- `#3067 <https://github.com/scipy/scipy/issues/3067>`__: cumtrapz not working as expected
- `#3069 <https://github.com/scipy/scipy/issues/3069>`__: sinc
- `#3086 <https://github.com/scipy/scipy/issues/3086>`__: standard error calculation inconsistent between 'stats' and 'mstats'
- `#3094 <https://github.com/scipy/scipy/issues/3094>`__: Add a `perm` function into `scipy.misc` and an enhancement of...
- `#3111 <https://github.com/scipy/scipy/issues/3111>`__: scipy.sparse.[hv]stack don't respect anymore the dtype parameter
- `#3172 <https://github.com/scipy/scipy/issues/3172>`__: optimize.curve_fit uses different nomenclature from optimize.leastsq
- `#3196 <https://github.com/scipy/scipy/issues/3196>`__: scipy.stats.mstats.gmean does not actually take dtype
- `#3212 <https://github.com/scipy/scipy/issues/3212>`__: Dot product of csr_matrix causes segmentation fault
- `#3227 <https://github.com/scipy/scipy/issues/3227>`__: ZeroDivisionError in broyden1 when initial guess is the right...
- `#3238 <https://github.com/scipy/scipy/issues/3238>`__: lbfgsb output not suppressed by disp=0
- `#3249 <https://github.com/scipy/scipy/issues/3249>`__: Sparse matrix min/max/etc don't support axis=-1
- `#3251 <https://github.com/scipy/scipy/issues/3251>`__: cdist performance issue with 'sqeuclidean' metric
- `#3279 <https://github.com/scipy/scipy/issues/3279>`__: logm fails for singular matrix
- `#3285 <https://github.com/scipy/scipy/issues/3285>`__: signal.chirp(method='hyp') disallows hyperbolic upsweep
- `#3299 <https://github.com/scipy/scipy/issues/3299>`__: MEMORY LEAK: fmin_tnc
- `#3330 <https://github.com/scipy/scipy/issues/3330>`__: test failures with the current master
- `#3345 <https://github.com/scipy/scipy/issues/3345>`__: scipy and/or numpy change is causing tests to fail in another...
- `#3363 <https://github.com/scipy/scipy/issues/3363>`__: splu does not work for non-vector inputs
- `#3385 <https://github.com/scipy/scipy/issues/3385>`__: expit does not handle large arguments well
- `#3395 <https://github.com/scipy/scipy/issues/3395>`__: specfun.f doesn't compile with MinGW
- `#3399 <https://github.com/scipy/scipy/issues/3399>`__: Error message bug in scipy.cluster.hierarchy.linkage
- `#3404 <https://github.com/scipy/scipy/issues/3404>`__: interpolate._ppoly doesn't build with MinGW
- `#3412 <https://github.com/scipy/scipy/issues/3412>`__: Test failures in signal
- `#3466 <https://github.com/scipy/scipy/issues/3466>`__: ```scipy.sparse.csgraph.shortest_path``` does not work on ```scipy.sparse.csr_matrix``` or ```lil_matrix```
Pull requests
-------------
- `#442 <https://github.com/scipy/scipy/pull/442>`__: ENH: sparse: enable 64-bit index arrays & nnz > 2**31
- `#2766 <https://github.com/scipy/scipy/pull/2766>`__: DOC: remove doc/seps/technology-preview.rst
- `#2772 <https://github.com/scipy/scipy/pull/2772>`__: TST: stats: Added a regression test for stats.wilcoxon. Closes...
- `#2778 <https://github.com/scipy/scipy/pull/2778>`__: Clean up stats._support, close statistics review issues
- `#2792 <https://github.com/scipy/scipy/pull/2792>`__: BUG io: fix file descriptor closing for netcdf variables
- `#2847 <https://github.com/scipy/scipy/pull/2847>`__: Rice distribution: extend to b=0, add an explicit rvs method.
- `#2878 <https://github.com/scipy/scipy/pull/2878>`__: [stats] fix formulas for higher moments of dweibull distribution
- `#2904 <https://github.com/scipy/scipy/pull/2904>`__: ENH: moments for the zipf distribution
- `#2907 <https://github.com/scipy/scipy/pull/2907>`__: ENH: add coverage info with coveralls.io for Travis runs.
- `#2932 <https://github.com/scipy/scipy/pull/2932>`__: BUG+TST: setdiag implementation for dia_matrix (Close #2931)...
- `#2942 <https://github.com/scipy/scipy/pull/2942>`__: Misc fixes pointed out by Eclipse PyDev static code analysis
- `#2946 <https://github.com/scipy/scipy/pull/2946>`__: ENH: allow non-monotonic input in interp1d
- `#2986 <https://github.com/scipy/scipy/pull/2986>`__: BUG: runtests: chdir away from root when running tests
- `#2987 <https://github.com/scipy/scipy/pull/2987>`__: DOC: linalg: don't recommend np.linalg.norm
- `#2992 <https://github.com/scipy/scipy/pull/2992>`__: ENH: Add "limit" parameter to dijkstra calculation
- `#2995 <https://github.com/scipy/scipy/pull/2995>`__: ENH: Use int shape
- `#3006 <https://github.com/scipy/scipy/pull/3006>`__: DOC: stats: add a log base note to the docstring
- `#3007 <https://github.com/scipy/scipy/pull/3007>`__: DEP: stats: Deprecate randwppf and randwcdf
- `#3008 <https://github.com/scipy/scipy/pull/3008>`__: Fix mstats.kurtosistest, and test coverage for skewtest/normaltest
- `#3009 <https://github.com/scipy/scipy/pull/3009>`__: Minor reST typo
- `#3010 <https://github.com/scipy/scipy/pull/3010>`__: Add `scipy.optimize.Result` to API docs
- `#3012 <https://github.com/scipy/scipy/pull/3012>`__: Corrects documentation error
- `#3052 <https://github.com/scipy/scipy/pull/3052>`__: PEP-8 conformance improvements
- `#3064 <https://github.com/scipy/scipy/pull/3064>`__: Binned statistic
- `#3068 <https://github.com/scipy/scipy/pull/3068>`__: Fix Issue #3067 fix cumptrapz that was raising an exception when...
- `#3073 <https://github.com/scipy/scipy/pull/3073>`__: Arff reader with nominal value of 1 character
- `#3074 <https://github.com/scipy/scipy/pull/3074>`__: Some maintenance work
- `#3080 <https://github.com/scipy/scipy/pull/3080>`__: Review and clean up all Box-Cox functions
- `#3083 <https://github.com/scipy/scipy/pull/3083>`__: Bug: should return 0 if no regions found
- `#3085 <https://github.com/scipy/scipy/pull/3085>`__: BUG: Use zpk in IIR filter design to improve accuracy
- `#3101 <https://github.com/scipy/scipy/pull/3101>`__: refactor stats tests a bit
- `#3112 <https://github.com/scipy/scipy/pull/3112>`__: ENH: implement Akima interpolation in 1D
- `#3123 <https://github.com/scipy/scipy/pull/3123>`__: MAINT: an easier way to make ranges from slices
- `#3124 <https://github.com/scipy/scipy/pull/3124>`__: File object support for imread and imsave
- `#3126 <https://github.com/scipy/scipy/pull/3126>`__: pep8ify stats/distributions.py
- `#3134 <https://github.com/scipy/scipy/pull/3134>`__: MAINT: split distributions.py into three files
- `#3138 <https://github.com/scipy/scipy/pull/3138>`__: clean up tests for discrete distributions
- `#3155 <https://github.com/scipy/scipy/pull/3155>`__: special: handle the edge case lambda=0 in pdtr, pdtrc and pdtrik
- `#3156 <https://github.com/scipy/scipy/pull/3156>`__: Rename optimize.Result to OptimizeResult
- `#3166 <https://github.com/scipy/scipy/pull/3166>`__: BUG: make curve_fit() work with array_like input. Closes gh-3037.
- `#3170 <https://github.com/scipy/scipy/pull/3170>`__: Fix numpy version checks
- `#3175 <https://github.com/scipy/scipy/pull/3175>`__: use numpy sinc
- `#3177 <https://github.com/scipy/scipy/pull/3177>`__: Update numpy version warning, remove oldnumeric import
- `#3178 <https://github.com/scipy/scipy/pull/3178>`__: DEP: remove deprecated umfpack wrapper. Closes gh-3002.
- `#3179 <https://github.com/scipy/scipy/pull/3179>`__: DOC: add BPoly to the docs
- `#3180 <https://github.com/scipy/scipy/pull/3180>`__: Suppress warnings when running stats.test()
- `#3181 <https://github.com/scipy/scipy/pull/3181>`__: altered sem func in mstats to match stats
- `#3182 <https://github.com/scipy/scipy/pull/3182>`__: Make weave tests behave
- `#3183 <https://github.com/scipy/scipy/pull/3183>`__: ENH: Add k-sample Anderson-Darling test to stats module
- `#3186 <https://github.com/scipy/scipy/pull/3186>`__: Fix stats.scoreatpercentile
- `#3187 <https://github.com/scipy/scipy/pull/3187>`__: DOC: make curve_fit nomenclature same as leastsq
- `#3201 <https://github.com/scipy/scipy/pull/3201>`__: Added axis keyword to dendrogram function
- `#3207 <https://github.com/scipy/scipy/pull/3207>`__: Make docstring examples in stats.distributions docstrings runnable
- `#3218 <https://github.com/scipy/scipy/pull/3218>`__: BUG: integrate: Fix banded jacobian handling in the "vode" and...
- `#3222 <https://github.com/scipy/scipy/pull/3222>`__: BUG: limit input ranges in special.nctdtr
- `#3223 <https://github.com/scipy/scipy/pull/3223>`__: Fix test errors with numpy master
- `#3224 <https://github.com/scipy/scipy/pull/3224>`__: Fix int32 overflows in sparsetools
- `#3228 <https://github.com/scipy/scipy/pull/3228>`__: DOC: tf2ss zpk2ss note controller canonical form
- `#3234 <https://github.com/scipy/scipy/pull/3234>`__: Add See Also links and Example graphs to filter design ``*ord`` functions
- `#3235 <https://github.com/scipy/scipy/pull/3235>`__: Updated the buttord function to be consistent with the other...
- `#3239 <https://github.com/scipy/scipy/pull/3239>`__: correct doc for pchip interpolation
- `#3240 <https://github.com/scipy/scipy/pull/3240>`__: DOC: fix ReST errors in the BPoly docstring
- `#3241 <https://github.com/scipy/scipy/pull/3241>`__: RF: check write attr of fileobject without writing
- `#3243 <https://github.com/scipy/scipy/pull/3243>`__: a bit of maintanence work in stats
- `#3245 <https://github.com/scipy/scipy/pull/3245>`__: BUG/ENH: stats: make frozen distributions hold separate instances
- `#3247 <https://github.com/scipy/scipy/pull/3247>`__: ENH function to return nnz per row/column in some sparse matrices
- `#3248 <https://github.com/scipy/scipy/pull/3248>`__: ENH much more efficient sparse min/max with axis
- `#3252 <https://github.com/scipy/scipy/pull/3252>`__: Fast sqeuclidean
- `#3253 <https://github.com/scipy/scipy/pull/3253>`__: FIX support axis=-1 and -2 for sparse reduce methods
- `#3254 <https://github.com/scipy/scipy/pull/3254>`__: TST tests for non-canonical input to sparse matrix operations
- `#3272 <https://github.com/scipy/scipy/pull/3272>`__: BUG: sparse: fix bugs in dia_matrix.setdiag
- `#3278 <https://github.com/scipy/scipy/pull/3278>`__: Also generate a tar.xz when running paver sdist
- `#3286 <https://github.com/scipy/scipy/pull/3286>`__: DOC: update 0.14.0 release notes.
- `#3289 <https://github.com/scipy/scipy/pull/3289>`__: TST: remove insecure mktemp use in tests
- `#3292 <https://github.com/scipy/scipy/pull/3292>`__: MAINT: fix a backwards incompatible change to stats.distributions.__all__
- `#3293 <https://github.com/scipy/scipy/pull/3293>`__: ENH: signal: Allow upsweeps of frequency in the 'hyperbolic'...
- `#3302 <https://github.com/scipy/scipy/pull/3302>`__: ENH: add dtype arg to stats.mstats.gmean and stats.mstats.hmean
- `#3307 <https://github.com/scipy/scipy/pull/3307>`__: DOC: add note about different ba forms in tf2zpk
- `#3309 <https://github.com/scipy/scipy/pull/3309>`__: doc enhancements to scipy.stats.mstats.winsorize
- `#3310 <https://github.com/scipy/scipy/pull/3310>`__: DOC: clarify matrix vs array in mmio docstrings
- `#3314 <https://github.com/scipy/scipy/pull/3314>`__: BUG: fix scipy.io.mmread() of gzipped files under Python3
- `#3323 <https://github.com/scipy/scipy/pull/3323>`__: ENH: Efficient interpolation on regular grids in arbitrary dimensions
- `#3332 <https://github.com/scipy/scipy/pull/3332>`__: DOC: clean up scipy.special docs
- `#3335 <https://github.com/scipy/scipy/pull/3335>`__: ENH: improve nanmedian performance
- `#3347 <https://github.com/scipy/scipy/pull/3347>`__: BUG: fix use of np.max in stats.fisher_exact
- `#3356 <https://github.com/scipy/scipy/pull/3356>`__: ENH: sparse: speed up LIL indexing + assignment via Cython
- `#3357 <https://github.com/scipy/scipy/pull/3357>`__: Fix "imresize does not work with size = int"
- `#3358 <https://github.com/scipy/scipy/pull/3358>`__: MAINT: rename AkimaInterpolator to Akima1DInterpolator
- `#3366 <https://github.com/scipy/scipy/pull/3366>`__: WHT: sparse: reindent ``dsolve/*.c *.h``
- `#3367 <https://github.com/scipy/scipy/pull/3367>`__: BUG: sparse/dsolve: fix dense matrix fortran order bugs in superlu...
- `#3369 <https://github.com/scipy/scipy/pull/3369>`__: ENH minimize, minimize_scalar: Add support for user-provided...
- `#3371 <https://github.com/scipy/scipy/pull/3371>`__: scipy.stats.sigmaclip doesn't appear in the html docs.
- `#3373 <https://github.com/scipy/scipy/pull/3373>`__: BUG: sparse/dsolve: detect invalid LAPACK parameters in superlu...
- `#3375 <https://github.com/scipy/scipy/pull/3375>`__: ENH: sparse/dsolve: make the L and U factors of splu and spilu...
- `#3377 <https://github.com/scipy/scipy/pull/3377>`__: MAINT: make travis build one target against Numpy 1.5
- `#3378 <https://github.com/scipy/scipy/pull/3378>`__: MAINT: fftpack: Remove the use of ``'import *'`` in a couple test...
- `#3381 <https://github.com/scipy/scipy/pull/3381>`__: MAINT: replace np.isinf(x) & (x>0) -> np.isposinf(x) to avoid...
- `#3383 <https://github.com/scipy/scipy/pull/3383>`__: MAINT: skip float96 tests on platforms without float96
- `#3384 <https://github.com/scipy/scipy/pull/3384>`__: MAINT: add pyflakes to Travis-CI
- `#3386 <https://github.com/scipy/scipy/pull/3386>`__: BUG: stable evaluation of expit
- `#3388 <https://github.com/scipy/scipy/pull/3388>`__: BUG: SuperLU: fix missing declaration of dlamch
- `#3389 <https://github.com/scipy/scipy/pull/3389>`__: BUG: sparse: downcast 64-bit indices safely to intp when required
- `#3390 <https://github.com/scipy/scipy/pull/3390>`__: BUG: nonlinear solvers are not confused by lucky guess
- `#3391 <https://github.com/scipy/scipy/pull/3391>`__: TST: fix sparse test errors due to axis=-1,-2 usage in np.matrix.sum().
- `#3392 <https://github.com/scipy/scipy/pull/3392>`__: BUG: sparse/lil: fix up Cython bugs in fused type lookup
- `#3393 <https://github.com/scipy/scipy/pull/3393>`__: BUG: sparse/compressed: work around bug in np.unique in earlier...
- `#3394 <https://github.com/scipy/scipy/pull/3394>`__: BUG: allow ClusterNode.pre_order() for non-root nodes
- `#3400 <https://github.com/scipy/scipy/pull/3400>`__: BUG: cluster.linkage ValueError typo bug
- `#3402 <https://github.com/scipy/scipy/pull/3402>`__: BUG: special: In specfun.f, replace the use of CMPLX with DCMPLX,...
- `#3408 <https://github.com/scipy/scipy/pull/3408>`__: MAINT: sparse: Numpy 1.5 compatibility fixes
- `#3410 <https://github.com/scipy/scipy/pull/3410>`__: MAINT: interpolate: fix blas defs in _ppoly
- `#3411 <https://github.com/scipy/scipy/pull/3411>`__: MAINT: Numpy 1.5 fixes in interpolate
- `#3413 <https://github.com/scipy/scipy/pull/3413>`__: Fix more test issues with older numpy versions
- `#3414 <https://github.com/scipy/scipy/pull/3414>`__: TST: signal: loosen some error tolerances in the filter tests....
- `#3415 <https://github.com/scipy/scipy/pull/3415>`__: MAINT: tools: automated close issue + pr listings for release...
- `#3440 <https://github.com/scipy/scipy/pull/3440>`__: MAINT: wrap sparsetools manually instead via SWIG
- `#3460 <https://github.com/scipy/scipy/pull/3460>`__: TST: open image file in binary mode
- `#3467 <https://github.com/scipy/scipy/pull/3467>`__: BUG: fix validation in csgraph.shortest_path

View file

@ -1,33 +0,0 @@
==========================
SciPy 0.14.1 Release Notes
==========================
SciPy 0.14.1 is a bug-fix release with no new features compared to 0.14.0.
Issues closed
-------------
- `#3630 <https://github.com/scipy/scipy/issues/3630>`__: NetCDF reading results in a segfault
- `#3631 <https://github.com/scipy/scipy/issues/3631>`__: SuperLU object not working as expected for complex matrices
- `#3733 <https://github.com/scipy/scipy/issues/3733>`__: segfault from map_coordinates
- `#3780 <https://github.com/scipy/scipy/issues/3780>`__: Segfault when using CSR/CSC matrix and uint32/uint64
- `#3781 <https://github.com/scipy/scipy/pull/3781>`__: BUG: sparse: fix omitted types in sparsetools typemaps
- `#3802 <https://github.com/scipy/scipy/issues/3802>`__: 0.14.0 API breakage: _gen generators are missing from scipy.stats.distributions API
- `#3805 <https://github.com/scipy/scipy/issues/3805>`__: ndimage test failures with numpy 1.10
- `#3812 <https://github.com/scipy/scipy/issues/3812>`__: == sometimes wrong on csr_matrix
- `#3853 <https://github.com/scipy/scipy/issues/3853>`__: Many scipy.sparse test errors/failures with numpy 1.9.0b2
- `#4084 <https://github.com/scipy/scipy/pull/4084>`__: fix exception declarations for Cython 0.21.1 compatibility
- `#4093 <https://github.com/scipy/scipy/pull/4093>`__: BUG: fitpack: avoid a memory error in splev(x, tck, der=k)
- `#4104 <https://github.com/scipy/scipy/pull/4104>`__: BUG: Workaround SGEMV segfault in Accelerate (maintenance 0.14.x)
- `#4143 <https://github.com/scipy/scipy/pull/4143>`__: BUG: fix ndimage functions for large data
- `#4149 <https://github.com/scipy/scipy/issues/4149>`__: Bug in expm for integer arrays
- `#4154 <https://github.com/scipy/scipy/issues/4154>`__: Backport gh-4041 for 0.14.1 (Ensure that the 'size' argument of PIL's 'resize' method is a tuple)
- `#4163 <https://github.com/scipy/scipy/issues/4163>`__: Backport #4142 (ZeroDivisionError in scipy.sparse.linalg.lsqr)
- `#4164 <https://github.com/scipy/scipy/issues/4164>`__: Backport gh-4153 (remove use of deprecated numpy API in lib/lapack/ f2py wrapper)
- `#4180 <https://github.com/scipy/scipy/pull/4180>`__: backport pil resize support tuple fix
- `#4168 <https://github.com/scipy/scipy/issues/4168>`__: Lots of arpack test failures on windows 32 bits with numpy 1.9.1
- `#4203 <https://github.com/scipy/scipy/issues/4203>`__: Matrix multiplication in 0.14.x is more than 10x slower compared...
- `#4218 <https://github.com/scipy/scipy/pull/4218>`__: attempt to make ndimage interpolation compatible with numpy relaxed...
- `#4225 <https://github.com/scipy/scipy/pull/4225>`__: BUG: off-by-one error in PPoly shape checks
- `#4248 <https://github.com/scipy/scipy/pull/4248>`__: BUG: optimize: fix issue with incorrect use of closure for slsqp.

View file

@ -1,594 +0,0 @@
==========================
SciPy 0.15.0 Release Notes
==========================
.. contents::
SciPy 0.15.0 is the culmination of 6 months of hard work. It contains
several new features, numerous bug-fixes, improved test coverage and
better documentation. There have been a number of deprecations and
API changes in this release, which are documented below. All users
are encouraged to upgrade to this release, as there are a large number
of bug-fixes and optimizations. Moreover, our development attention
will now shift to bug-fix releases on the 0.16.x branch, and on adding
new features on the master branch.
This release requires Python 2.6, 2.7 or 3.2-3.4 and NumPy 1.5.1 or greater.
New features
============
Linear Programming Interface
----------------------------
The new function `scipy.optimize.linprog` provides a generic
linear programming similar to the way `scipy.optimize.minimize`
provides a generic interface to nonlinear programming optimizers.
Currently the only method supported is *simplex* which provides
a two-phase, dense-matrix-based simplex algorithm. Callbacks
functions are supported, allowing the user to monitor the progress
of the algorithm.
Differential evolution, a global optimizer
------------------------------------------
A new `scipy.optimize.differential_evolution` function has been added to the
``optimize`` module. Differential Evolution is an algorithm used for finding
the global minimum of multivariate functions. It is stochastic in nature (does
not use gradient methods), and can search large areas of candidate space, but
often requires larger numbers of function evaluations than conventional
gradient based techniques.
``scipy.signal`` improvements
-----------------------------
The function `scipy.signal.max_len_seq` was added, which computes a Maximum
Length Sequence (MLS) signal.
``scipy.integrate`` improvements
--------------------------------
It is now possible to use `scipy.integrate` routines to integrate
multivariate ctypes functions, thus avoiding callbacks to Python and
providing better performance.
``scipy.linalg`` improvements
-----------------------------
The function `scipy.linalg.orthogonal_procrustes` for solving the procrustes
linear algebra problem was added.
BLAS level 2 functions ``her``, ``syr``, ``her2`` and ``syr2`` are now wrapped
in ``scipy.linalg``.
``scipy.sparse`` improvements
-----------------------------
`scipy.sparse.linalg.svds` can now take a ``LinearOperator`` as its main input.
``scipy.special`` improvements
------------------------------
Values of ellipsoidal harmonic (i.e. Lame) functions and associated
normalization constants can be now computed using ``ellip_harm``,
``ellip_harm_2``, and ``ellip_normal``.
New convenience functions ``entr``, ``rel_entr`` ``kl_div``,
``huber``, and ``pseudo_huber`` were added.
``scipy.sparse.csgraph`` improvements
-------------------------------------
Routines ``reverse_cuthill_mckee`` and ``maximum_bipartite_matching``
for computing reorderings of sparse graphs were added.
``scipy.stats`` improvements
----------------------------
Added a Dirichlet multivariate distribution, `scipy.stats.dirichlet`.
The new function `scipy.stats.median_test` computes Mood's median test.
The new function `scipy.stats.combine_pvalues` implements Fisher's
and Stouffer's methods for combining p-values.
`scipy.stats.describe` returns a namedtuple rather than a tuple, allowing
users to access results by index or by name.
Deprecated features
===================
The `scipy.weave` module is deprecated. It was the only module never ported
to Python 3.x, and is not recommended to be used for new code - use Cython
instead. In order to support existing code, ``scipy.weave`` has been packaged
separately: https://github.com/scipy/weave. It is a pure Python package, and
can easily be installed with ``pip install weave``.
`scipy.special.bessel_diff_formula` is deprecated. It is a private function,
and therefore will be removed from the public API in a following release.
``scipy.stats.nanmean``, ``nanmedian`` and ``nanstd`` functions are deprecated
in favor of their numpy equivalents.
Backwards incompatible changes
==============================
scipy.ndimage
-------------
The functions `scipy.ndimage.minimum_positions`,
`scipy.ndimage.maximum_positions`` and `scipy.ndimage.extrema` return
positions as ints instead of floats.
scipy.integrate
---------------
The format of banded Jacobians in `scipy.integrate.ode` solvers is
changed. Note that the previous documentation of this feature was
erroneous.
Authors
=======
* Abject +
* Ankit Agrawal +
* Sylvain Bellemare +
* Matthew Brett
* Christian Brodbeck
* Christian Brueffer
* Lars Buitinck
* Evgeni Burovski
* Pierre de Buyl +
* Greg Caporaso +
* CJ Carey
* Jacob Carey +
* Thomas A Caswell
* Helder Cesar +
* Björn Dahlgren +
* Kevin Davies +
* Yotam Doron +
* Marcos Duarte +
* endolith
* Jesse Engel +
* Rob Falck +
* Corey Farwell +
* Jaime Fernandez del Rio +
* Clark Fitzgerald +
* Tom Flannaghan +
* Chad Fulton +
* Jochen Garcke +
* François Garillot +
* André Gaul
* Christoph Gohlke
* Ralf Gommers
* Alex Griffing
* Blake Griffith
* Olivier Grisel
* Charles Harris
* Trent Hauck +
* Ian Henriksen +
* Jinhyok Heo +
* Matt Hickford +
* Andreas Hilboll
* Danilo Horta +
* David Menéndez Hurtado +
* Gert-Ludwig Ingold
* Thouis (Ray) Jones
* Chris Kerr +
* Carl Kleffner +
* Andreas Kloeckner
* Thomas Kluyver +
* Adrian Kretz +
* Johannes Kulick +
* Eric Larson
* Brianna Laugher +
* Denis Laxalde
* Antony Lee +
* Gregory R. Lee +
* Brandon Liu
* Alex Loew +
* Loïc Estève +
* Jaakko Luttinen +
* Benny Malengier
* Tobias Megies +
* Sturla Molden
* Eric Moore
* Brett R. Murphy +
* Paul Nation +
* Andrew Nelson
* Brian Newsom +
* Joel Nothman
* Sergio Oller +
* Janani Padmanabhan +
* Tiago M.D. Pereira +
* Nicolas Del Piano +
* Manuel Reinhardt +
* Thomas Robitaille
* Mike Romberg +
* Alex Rothberg +
* Sebastian Pölsterl +
* Maximilian Singh +
* Brigitta Sipocz +
* Alex Stewart +
* Julian Taylor
* Collin Tokheim +
* James Tomlinson +
* Benjamin Trendelkamp-Schroer +
* Richard Tsai
* Alexey Umnov +
* Jacob Vanderplas
* Joris Vankerschaver
* Bastian Venthur +
* Pauli Virtanen
* Stefan van der Walt
* Yuxiang Wang +
* James T. Webber
* Warren Weckesser
* Axl West +
* Nathan Woods
* Benda Xu +
* Víctor Zabalza +
* Tiziano Zito +
A total of 99 people contributed to this release.
People with a "+" by their names contributed a patch for the first time.
This list of names is automatically generated, and may not be fully complete.
Issues closed
-------------
- `#1431 <https://github.com/scipy/scipy/issues/1431>`__: ellipk(x) extending its domain for x<0 (Trac #904)
- `#1727 <https://github.com/scipy/scipy/issues/1727>`__: consistency of std interface (Trac #1200)
- `#1851 <https://github.com/scipy/scipy/issues/1851>`__: Shape parameter negated in genextreme (relative to R, MATLAB,...
- `#1889 <https://github.com/scipy/scipy/issues/1889>`__: interp2d is weird (Trac #1364)
- `#2188 <https://github.com/scipy/scipy/issues/2188>`__: splev gives wrong values or crashes outside of support when der...
- `#2343 <https://github.com/scipy/scipy/issues/2343>`__: scipy.insterpolate's splrep function fails with certain combinations...
- `#2669 <https://github.com/scipy/scipy/issues/2669>`__: .signal.ltisys.ss2tf should only apply to MISO systems in current...
- `#2911 <https://github.com/scipy/scipy/issues/2911>`__: interpolate.splder() failure on Fedora
- `#3171 <https://github.com/scipy/scipy/issues/3171>`__: future of weave in scipy
- `#3176 <https://github.com/scipy/scipy/issues/3176>`__: Suggestion to improve error message in scipy.integrate.odeint
- `#3198 <https://github.com/scipy/scipy/issues/3198>`__: pdf() and logpdf() methods for scipy.stats.gaussian_kde
- `#3318 <https://github.com/scipy/scipy/issues/3318>`__: Travis CI is breaking on test("full")
- `#3329 <https://github.com/scipy/scipy/issues/3329>`__: scipy.stats.scoreatpercentile backward-incompatible change not...
- `#3362 <https://github.com/scipy/scipy/issues/3362>`__: Reference cycle in scipy.sparse.linalg.eigs with shift-invert...
- `#3364 <https://github.com/scipy/scipy/issues/3364>`__: BUG: linalg.hessenberg broken (wrong results)
- `#3376 <https://github.com/scipy/scipy/issues/3376>`__: stats f_oneway needs floats
- `#3379 <https://github.com/scipy/scipy/issues/3379>`__: Installation of scipy 0.13.3 via zc.buildout fails
- `#3403 <https://github.com/scipy/scipy/issues/3403>`__: hierarchy.linkage raises an ugly exception for a compressed 2x2...
- `#3422 <https://github.com/scipy/scipy/issues/3422>`__: optimize.curve_fit() handles NaN by returning all parameters...
- `#3457 <https://github.com/scipy/scipy/issues/3457>`__: linalg.fractional_matrix_power has no docstring
- `#3469 <https://github.com/scipy/scipy/issues/3469>`__: DOC: `ndimage.find_object` ignores zero-values
- `#3491 <https://github.com/scipy/scipy/issues/3491>`__: optimize.leastsq() documentation should mention it does not work...
- `#3499 <https://github.com/scipy/scipy/issues/3499>`__: cluster.vq.whiten return nan for all zeros column in observations
- `#3503 <https://github.com/scipy/scipy/issues/3503>`__: minimize attempts to do vector addition when numpy arrays are...
- `#3508 <https://github.com/scipy/scipy/issues/3508>`__: exponweib.logpdf fails for valid parameters
- `#3509 <https://github.com/scipy/scipy/issues/3509>`__: libatlas3-base-dev does not exist
- `#3550 <https://github.com/scipy/scipy/issues/3550>`__: BUG: anomalous values computed by special.ellipkinc
- `#3555 <https://github.com/scipy/scipy/issues/3555>`__: `scipy.ndimage` positions are float instead of int
- `#3557 <https://github.com/scipy/scipy/issues/3557>`__: UnivariateSpline.__call__ should pass all relevant args through...
- `#3569 <https://github.com/scipy/scipy/issues/3569>`__: No license statement for test data imported from boost?
- `#3576 <https://github.com/scipy/scipy/issues/3576>`__: mstats test failure (too sensitive?)
- `#3579 <https://github.com/scipy/scipy/issues/3579>`__: Errors on scipy 0.14.x branch using MKL, Ubuntu 14.04 x86_64
- `#3580 <https://github.com/scipy/scipy/issues/3580>`__: Operator overloading with sparse matrices
- `#3587 <https://github.com/scipy/scipy/issues/3587>`__: Wrong alphabetical order in continuous statistical distribution...
- `#3596 <https://github.com/scipy/scipy/issues/3596>`__: scipy.signal.fftconvolve no longer threadsafe
- `#3623 <https://github.com/scipy/scipy/issues/3623>`__: BUG: signal.convolve takes longer than it needs to
- `#3655 <https://github.com/scipy/scipy/issues/3655>`__: Integer returned from integer data in scipy.signal.periodogram...
- `#3662 <https://github.com/scipy/scipy/issues/3662>`__: Travis failure on Numpy 1.5.1 (not reproducible?)
- `#3668 <https://github.com/scipy/scipy/issues/3668>`__: dendogram(orientation='foo')
- `#3669 <https://github.com/scipy/scipy/issues/3669>`__: KroghInterpolator doesn't pass through points
- `#3672 <https://github.com/scipy/scipy/issues/3672>`__: Inserting a knot in a spline
- `#3682 <https://github.com/scipy/scipy/issues/3682>`__: misleading documentation of scipy.optimize.curve_fit
- `#3699 <https://github.com/scipy/scipy/issues/3699>`__: BUG?: minor problem with scipy.signal.lfilter w/initial conditions
- `#3700 <https://github.com/scipy/scipy/issues/3700>`__: Inconsistent exceptions raised by scipy.io.loadmat
- `#3703 <https://github.com/scipy/scipy/issues/3703>`__: TypeError for RegularGridInterpolator with big-endian data
- `#3714 <https://github.com/scipy/scipy/issues/3714>`__: Misleading error message in eigsh: k must be between 1 and rank(A)-1
- `#3720 <https://github.com/scipy/scipy/issues/3720>`__: coo_matrix.setdiag() fails
- `#3740 <https://github.com/scipy/scipy/issues/3740>`__: Scipy.Spatial.KdTree (Query) Return Type?
- `#3761 <https://github.com/scipy/scipy/issues/3761>`__: Invalid result from scipy.special.btdtri
- `#3784 <https://github.com/scipy/scipy/issues/3784>`__: DOC - Special Functions - Drum example fix for higher modes
- `#3785 <https://github.com/scipy/scipy/issues/3785>`__: minimize() should have friendlier args=
- `#3787 <https://github.com/scipy/scipy/issues/3787>`__: BUG: signal: Division by zero in lombscargle
- `#3800 <https://github.com/scipy/scipy/issues/3800>`__: BUG: scipy.sparse.csgraph.shortest_path overwrites input matrix
- `#3817 <https://github.com/scipy/scipy/issues/3817>`__: Warning in calculating moments from Binomial distribution for...
- `#3821 <https://github.com/scipy/scipy/issues/3821>`__: review scipy usage of `np.ma.is_masked`
- `#3829 <https://github.com/scipy/scipy/issues/3829>`__: Linear algebra function documentation doesn't mention default...
- `#3830 <https://github.com/scipy/scipy/issues/3830>`__: A bug in Docstring of scipy.linalg.eig
- `#3844 <https://github.com/scipy/scipy/issues/3844>`__: Issue with shape parameter returned by genextreme
- `#3858 <https://github.com/scipy/scipy/issues/3858>`__: "ImportError: No module named Cython.Compiler.Main" on install
- `#3876 <https://github.com/scipy/scipy/issues/3876>`__: savgol_filter not in release notes and has no versionadded
- `#3884 <https://github.com/scipy/scipy/issues/3884>`__: scipy.stats.kendalltau empty array error
- `#3895 <https://github.com/scipy/scipy/issues/3895>`__: ValueError: illegal value in 12-th argument of internal gesdd...
- `#3898 <https://github.com/scipy/scipy/issues/3898>`__: skimage test broken by minmax filter change
- `#3901 <https://github.com/scipy/scipy/issues/3901>`__: scipy sparse errors with numpy master
- `#3905 <https://github.com/scipy/scipy/issues/3905>`__: DOC: optimize: linprog docstring has two "Returns" sections
- `#3915 <https://github.com/scipy/scipy/issues/3915>`__: DOC: sphinx warnings because of `**kwds` in the stats distributions...
- `#3935 <https://github.com/scipy/scipy/issues/3935>`__: Split stats.distributions files in tutorial
- `#3969 <https://github.com/scipy/scipy/issues/3969>`__: gh-3607 breaks backward compatibility in ode solver banded jacobians
- `#4025 <https://github.com/scipy/scipy/issues/4025>`__: DOC: signal: The return value of find_peaks_cwt is not documented.
- `#4029 <https://github.com/scipy/scipy/issues/4029>`__: scipy.stats.nbinom.logpmf(0,1,1) returns nan. Correct value is...
- `#4032 <https://github.com/scipy/scipy/issues/4032>`__: ERROR: test_imresize (test_pilutil.TestPILUtil)
- `#4038 <https://github.com/scipy/scipy/issues/4038>`__: errors do not propagate through scipy.integrate.odeint properly
- `#4171 <https://github.com/scipy/scipy/issues/4171>`__: orthogonal_procrustes always returns scale.
- `#4176 <https://github.com/scipy/scipy/issues/4176>`__: Solving the Discrete Lyapunov Equation does not work with matrix...
Pull requests
-------------
- `#3109 <https://github.com/scipy/scipy/pull/3109>`__: ENH Added Fisher's method and Stouffer's Z-score method
- `#3225 <https://github.com/scipy/scipy/pull/3225>`__: Add the limiting distributions to generalized Pareto distribution...
- `#3262 <https://github.com/scipy/scipy/pull/3262>`__: Implement back end of faster multivariate integration
- `#3266 <https://github.com/scipy/scipy/pull/3266>`__: ENH: signal: add type=False as parameter for periodogram and...
- `#3273 <https://github.com/scipy/scipy/pull/3273>`__: Add PEP8 check to Travis-CI
- `#3342 <https://github.com/scipy/scipy/pull/3342>`__: ENH: linprog function for linear programming
- `#3348 <https://github.com/scipy/scipy/pull/3348>`__: BUG: add proper error handling when using interp2d on regular...
- `#3351 <https://github.com/scipy/scipy/pull/3351>`__: ENH: Add MLS method
- `#3382 <https://github.com/scipy/scipy/pull/3382>`__: ENH: scipy.special information theory functions
- `#3396 <https://github.com/scipy/scipy/pull/3396>`__: ENH: improve stats.nanmedian more by assuming nans are rare
- `#3398 <https://github.com/scipy/scipy/pull/3398>`__: Added two wrappers to the gaussian_kde class.
- `#3405 <https://github.com/scipy/scipy/pull/3405>`__: BUG: cluster.linkage array conversion to double dtype
- `#3407 <https://github.com/scipy/scipy/pull/3407>`__: MAINT: use assert_warns instead of a more complicated mechanism
- `#3409 <https://github.com/scipy/scipy/pull/3409>`__: ENH: change to use array view in signal/_peak_finding.py
- `#3416 <https://github.com/scipy/scipy/pull/3416>`__: Issue 3376 : stats f_oneway needs floats
- `#3419 <https://github.com/scipy/scipy/pull/3419>`__: BUG: tools: Fix list of FMA instructions in detect_cpu_extensions_wine.py
- `#3420 <https://github.com/scipy/scipy/pull/3420>`__: DOC: stats: Add 'entropy' to the stats package-level documentation.
- `#3429 <https://github.com/scipy/scipy/pull/3429>`__: BUG: close intermediate file descriptor right after it is used...
- `#3430 <https://github.com/scipy/scipy/pull/3430>`__: MAINT: Fix some cython variable declarations to avoid warnings...
- `#3433 <https://github.com/scipy/scipy/pull/3433>`__: Correcting the normalization of chebwin window function
- `#3435 <https://github.com/scipy/scipy/pull/3435>`__: Add more precise link to R's quantile documentation
- `#3446 <https://github.com/scipy/scipy/pull/3446>`__: ENH: scipy.optimize - adding differential_evolution
- `#3450 <https://github.com/scipy/scipy/pull/3450>`__: MAINT: remove unused function scipy.stats.mstats_basic._kolmog1
- `#3458 <https://github.com/scipy/scipy/pull/3458>`__: Reworked version of PR-3084 (mstats-stats comparison)
- `#3462 <https://github.com/scipy/scipy/pull/3462>`__: MAINT : Returning a warning for low attenuation values of chebwin...
- `#3463 <https://github.com/scipy/scipy/pull/3463>`__: DOC: linalg: Add examples to functions in matfuncs.py
- `#3477 <https://github.com/scipy/scipy/pull/3477>`__: ENH: sparse: release GIL in sparsetools routines
- `#3480 <https://github.com/scipy/scipy/pull/3480>`__: DOC: Add more details to deconvolve docstring
- `#3484 <https://github.com/scipy/scipy/pull/3484>`__: BLD: fix Qhull build issue with MinGW-w64. Closes gh-3237.
- `#3498 <https://github.com/scipy/scipy/pull/3498>`__: MAINT: io: remove old warnings from idl.py
- `#3504 <https://github.com/scipy/scipy/pull/3504>`__: BUG: cluster.vq.whiten returns nan or inf when std==0
- `#3510 <https://github.com/scipy/scipy/pull/3510>`__: MAINT: stats: Reimplement the pdf and logpdf methods of exponweib.
- `#3512 <https://github.com/scipy/scipy/pull/3512>`__: Fix PEP8 errors showing up on TravisCI after pep8 1.5 release
- `#3514 <https://github.com/scipy/scipy/pull/3514>`__: DOC: libatlas3-base-dev seems to have never been a thing
- `#3516 <https://github.com/scipy/scipy/pull/3516>`__: DOC improve scipy.sparse docstrings
- `#3517 <https://github.com/scipy/scipy/pull/3517>`__: ENH: speed-up ndimage.filters.min(max)imum_filter1d
- `#3518 <https://github.com/scipy/scipy/pull/3518>`__: Issues in scipy.misc.logsumexp
- `#3526 <https://github.com/scipy/scipy/pull/3526>`__: DOC: graphical example for cwt, and use a more interesting signal
- `#3527 <https://github.com/scipy/scipy/pull/3527>`__: ENH: Implement min(max)imum_filter1d using the MINLIST algorithm
- `#3537 <https://github.com/scipy/scipy/pull/3537>`__: STY: reduce number of C compiler warnings
- `#3540 <https://github.com/scipy/scipy/pull/3540>`__: DOC: linalg: add docstring to fractional_matrix_power
- `#3542 <https://github.com/scipy/scipy/pull/3542>`__: kde.py Doc Typo
- `#3545 <https://github.com/scipy/scipy/pull/3545>`__: BUG: stats: stats.levy.cdf with small arguments loses precision.
- `#3547 <https://github.com/scipy/scipy/pull/3547>`__: BUG: special: erfcinv with small arguments loses precision.
- `#3553 <https://github.com/scipy/scipy/pull/3553>`__: DOC: Convolve examples
- `#3561 <https://github.com/scipy/scipy/pull/3561>`__: FIX: in ndimage.measurements return positions as int instead...
- `#3564 <https://github.com/scipy/scipy/pull/3564>`__: Fix test failures with numpy master. Closes gh-3554
- `#3565 <https://github.com/scipy/scipy/pull/3565>`__: ENH: make interp2d accept unsorted arrays for interpolation.
- `#3566 <https://github.com/scipy/scipy/pull/3566>`__: BLD: add numpy requirement to metadata if it can't be imported.
- `#3567 <https://github.com/scipy/scipy/pull/3567>`__: DOC: move matfuncs docstrings to user-visible functions
- `#3574 <https://github.com/scipy/scipy/pull/3574>`__: Fixes multiple bugs in mstats.theilslopes
- `#3577 <https://github.com/scipy/scipy/pull/3577>`__: TST: decrease sensitivity of an mstats test
- `#3585 <https://github.com/scipy/scipy/pull/3585>`__: Cleanup of code in scipy.constants
- `#3589 <https://github.com/scipy/scipy/pull/3589>`__: BUG: sparse: allow operator overloading
- `#3594 <https://github.com/scipy/scipy/pull/3594>`__: BUG: lobpcg returned wrong values for small matrices (n < 10)
- `#3598 <https://github.com/scipy/scipy/pull/3598>`__: MAINT: fix coverage and coveralls
- `#3599 <https://github.com/scipy/scipy/pull/3599>`__: MAINT: symeig -- now that's a name I've not heard in a long time
- `#3602 <https://github.com/scipy/scipy/pull/3602>`__: MAINT: clean up the new optimize.linprog and add a few more tests
- `#3607 <https://github.com/scipy/scipy/pull/3607>`__: BUG: integrate: Fix some bugs and documentation errors in the...
- `#3609 <https://github.com/scipy/scipy/pull/3609>`__: MAINT integrate/odepack: kill dead Fortran code
- `#3616 <https://github.com/scipy/scipy/pull/3616>`__: FIX: Invalid values
- `#3617 <https://github.com/scipy/scipy/pull/3617>`__: Sort netcdf variables in a Python-3 compatible way
- `#3622 <https://github.com/scipy/scipy/pull/3622>`__: DOC: Added 0.15.0 release notes entry for linprog function.
- `#3625 <https://github.com/scipy/scipy/pull/3625>`__: Fix documentation for cKDTree.sparse_distance_matrix
- `#3626 <https://github.com/scipy/scipy/pull/3626>`__: MAINT: linalg.orth memory efficiency
- `#3627 <https://github.com/scipy/scipy/pull/3627>`__: MAINT: stats: A bit of clean up
- `#3628 <https://github.com/scipy/scipy/pull/3628>`__: MAINT: signal: remove a useless function from wavelets.py
- `#3632 <https://github.com/scipy/scipy/pull/3632>`__: ENH: stats: Add Mood's median test.
- `#3636 <https://github.com/scipy/scipy/pull/3636>`__: MAINT: cluster: some clean up
- `#3638 <https://github.com/scipy/scipy/pull/3638>`__: DOC: docstring of optimize.basinhopping confuses singular and...
- `#3639 <https://github.com/scipy/scipy/pull/3639>`__: BUG: change ddof default to 1 in mstats.sem, consistent with...
- `#3640 <https://github.com/scipy/scipy/pull/3640>`__: Weave: deprecate the module and disable slow tests on TravisCI
- `#3641 <https://github.com/scipy/scipy/pull/3641>`__: ENH: Added support for date attributes to io.arff.arffread
- `#3644 <https://github.com/scipy/scipy/pull/3644>`__: MAINT: stats: remove superfluous alias in mstats_basic.py
- `#3646 <https://github.com/scipy/scipy/pull/3646>`__: ENH: adding `sum_duplicates` method to COO sparse matrix
- `#3647 <https://github.com/scipy/scipy/pull/3647>`__: Fix for #3596: Make fftconvolve threadsafe
- `#3650 <https://github.com/scipy/scipy/pull/3650>`__: BUG: sparse: smarter random index selection
- `#3652 <https://github.com/scipy/scipy/pull/3652>`__: fix wrong option name in power_divergence dosctring example
- `#3654 <https://github.com/scipy/scipy/pull/3654>`__: Changing EPD to Canopy
- `#3657 <https://github.com/scipy/scipy/pull/3657>`__: BUG: signal.welch: ensure floating point dtype regardless of...
- `#3660 <https://github.com/scipy/scipy/pull/3660>`__: TST: mark a test as known fail
- `#3661 <https://github.com/scipy/scipy/pull/3661>`__: BLD: ignore pep8 E302 (expected 2 blank lines, found 1)
- `#3663 <https://github.com/scipy/scipy/pull/3663>`__: BUG: fix leaking errstate, and ignore invalid= errors in a test
- `#3664 <https://github.com/scipy/scipy/pull/3664>`__: BUG: correlate was extremely slow when in2.size > in1.size
- `#3667 <https://github.com/scipy/scipy/pull/3667>`__: ENH: Adds default params to pdfs of multivariate_norm
- `#3670 <https://github.com/scipy/scipy/pull/3670>`__: ENH: Small speedup of FFT size check
- `#3671 <https://github.com/scipy/scipy/pull/3671>`__: DOC: adding differential_evolution function to 0.15 release notes
- `#3673 <https://github.com/scipy/scipy/pull/3673>`__: BUG: interpolate/fitpack: arguments to fortran routines may not...
- `#3674 <https://github.com/scipy/scipy/pull/3674>`__: Add support for appending to existing netcdf files
- `#3681 <https://github.com/scipy/scipy/pull/3681>`__: Speed up test('full'), solve Travis CI timeout issues
- `#3683 <https://github.com/scipy/scipy/pull/3683>`__: ENH: cluster: rewrite and optimize `vq` in Cython
- `#3684 <https://github.com/scipy/scipy/pull/3684>`__: Update special docs
- `#3688 <https://github.com/scipy/scipy/pull/3688>`__: Spacing in special docstrings
- `#3692 <https://github.com/scipy/scipy/pull/3692>`__: ENH: scipy.special: Improving sph_harm function
- `#3693 <https://github.com/scipy/scipy/pull/3693>`__: Update refguide entries for signal and fftpack
- `#3695 <https://github.com/scipy/scipy/pull/3695>`__: Update continuous.rst
- `#3696 <https://github.com/scipy/scipy/pull/3696>`__: ENH: check for valid 'orientation' kwarg in dendrogram()
- `#3701 <https://github.com/scipy/scipy/pull/3701>`__: make 'a' and 'b' coefficients atleast_1d array in filtfilt
- `#3702 <https://github.com/scipy/scipy/pull/3702>`__: BUG: cluster: _vq unable to handle large features
- `#3704 <https://github.com/scipy/scipy/pull/3704>`__: BUG: special: ellip(k,e)inc nan and double expected value
- `#3707 <https://github.com/scipy/scipy/pull/3707>`__: BUG: handle fill_value dtype checks correctly in RegularGridInterpolator
- `#3708 <https://github.com/scipy/scipy/pull/3708>`__: Reraise exception on failure to read mat file.
- `#3709 <https://github.com/scipy/scipy/pull/3709>`__: BUG: cast 'x' to correct dtype in KroghInterpolator._evaluate
- `#3712 <https://github.com/scipy/scipy/pull/3712>`__: ENH: cluster: reimplement the update-step of K-means in Cython
- `#3713 <https://github.com/scipy/scipy/pull/3713>`__: FIX: Check type of lfiltic
- `#3718 <https://github.com/scipy/scipy/pull/3718>`__: Changed INSTALL file extension to rst
- `#3719 <https://github.com/scipy/scipy/pull/3719>`__: address svds returning nans for zero input matrix
- `#3722 <https://github.com/scipy/scipy/pull/3722>`__: MAINT: spatial: static, unused code, sqrt(sqeuclidean)
- `#3725 <https://github.com/scipy/scipy/pull/3725>`__: ENH: use numpys nanmedian if available
- `#3727 <https://github.com/scipy/scipy/pull/3727>`__: TST: add a new fixed_point test and change some test function...
- `#3731 <https://github.com/scipy/scipy/pull/3731>`__: BUG: fix romb in scipy.integrate.quadrature
- `#3734 <https://github.com/scipy/scipy/pull/3734>`__: DOC: simplify examples with semilogx
- `#3735 <https://github.com/scipy/scipy/pull/3735>`__: DOC: Add minimal docstrings to lti.impulse/step
- `#3736 <https://github.com/scipy/scipy/pull/3736>`__: BUG: cast pchip arguments to floats
- `#3744 <https://github.com/scipy/scipy/pull/3744>`__: stub out inherited methods of Akima1DInterpolator
- `#3746 <https://github.com/scipy/scipy/pull/3746>`__: DOC: Fix formatting for Raises section
- `#3748 <https://github.com/scipy/scipy/pull/3748>`__: ENH: Added discrete Lyapunov transformation solve
- `#3750 <https://github.com/scipy/scipy/pull/3750>`__: Enable automated testing with Python 3.4
- `#3751 <https://github.com/scipy/scipy/pull/3751>`__: Reverse Cuthill-McKee and Maximum Bipartite Matching reorderings...
- `#3759 <https://github.com/scipy/scipy/pull/3759>`__: MAINT: avoid indexing with a float array
- `#3762 <https://github.com/scipy/scipy/pull/3762>`__: TST: filter out RuntimeWarning in vq tests
- `#3766 <https://github.com/scipy/scipy/pull/3766>`__: TST: cluster: some cleanups in test_hierarchy.py
- `#3767 <https://github.com/scipy/scipy/pull/3767>`__: ENH/BUG: support negative m in elliptic integrals
- `#3769 <https://github.com/scipy/scipy/pull/3769>`__: ENH: avoid repeated matrix inverse
- `#3770 <https://github.com/scipy/scipy/pull/3770>`__: BUG: signal: In lfilter_zi, b was not rescaled correctly when...
- `#3772 <https://github.com/scipy/scipy/pull/3772>`__: STY avoid unnecessary transposes in csr_matrix.getcol/row
- `#3773 <https://github.com/scipy/scipy/pull/3773>`__: ENH: Add ext parameter to UnivariateSpline call
- `#3774 <https://github.com/scipy/scipy/pull/3774>`__: BUG: in integrate/quadpack.h, put all declarations before statements.
- `#3779 <https://github.com/scipy/scipy/pull/3779>`__: Incbet fix
- `#3788 <https://github.com/scipy/scipy/pull/3788>`__: BUG: Fix lombscargle ZeroDivisionError
- `#3791 <https://github.com/scipy/scipy/pull/3791>`__: Some maintenance for doc builds
- `#3795 <https://github.com/scipy/scipy/pull/3795>`__: scipy.special.legendre docstring
- `#3796 <https://github.com/scipy/scipy/pull/3796>`__: TYPO: sheroidal -> spheroidal
- `#3801 <https://github.com/scipy/scipy/pull/3801>`__: BUG: shortest_path overwrite
- `#3803 <https://github.com/scipy/scipy/pull/3803>`__: TST: lombscargle regression test related to atan vs atan2
- `#3809 <https://github.com/scipy/scipy/pull/3809>`__: ENH: orthogonal procrustes solver
- `#3811 <https://github.com/scipy/scipy/pull/3811>`__: ENH: scipy.special, Implemented Ellipsoidal harmonic function:...
- `#3819 <https://github.com/scipy/scipy/pull/3819>`__: BUG: make a fully connected csgraph from an ndarray with no zeros
- `#3820 <https://github.com/scipy/scipy/pull/3820>`__: MAINT: avoid spurious warnings in binom(n, p=0).mean() etc
- `#3825 <https://github.com/scipy/scipy/pull/3825>`__: Don't claim scipy.cluster does distance matrix calculations.
- `#3827 <https://github.com/scipy/scipy/pull/3827>`__: get and set diagonal of coo_matrix, and related csgraph laplacian...
- `#3832 <https://github.com/scipy/scipy/pull/3832>`__: DOC: Minor additions to integrate/nquad docstring.
- `#3845 <https://github.com/scipy/scipy/pull/3845>`__: Bug fix for #3842: Bug in scipy.optimize.line_search
- `#3848 <https://github.com/scipy/scipy/pull/3848>`__: BUG: edge case where the covariance matrix is exactly zero
- `#3850 <https://github.com/scipy/scipy/pull/3850>`__: DOC: typo
- `#3851 <https://github.com/scipy/scipy/pull/3851>`__: DOC: document default argument values for some arpack functions
- `#3860 <https://github.com/scipy/scipy/pull/3860>`__: DOC: sparse: add the function 'find' to the module-level docstring
- `#3861 <https://github.com/scipy/scipy/pull/3861>`__: BUG: Removed unnecessary storage of args as instance variables...
- `#3862 <https://github.com/scipy/scipy/pull/3862>`__: BUG: signal: fix handling of multi-output systems in ss2tf.
- `#3865 <https://github.com/scipy/scipy/pull/3865>`__: Feature request: ability to read heterogeneous types in FortranFile
- `#3866 <https://github.com/scipy/scipy/pull/3866>`__: MAINT: update pip wheelhouse for installs
- `#3871 <https://github.com/scipy/scipy/pull/3871>`__: MAINT: linalg: get rid of calc_lwork.f
- `#3872 <https://github.com/scipy/scipy/pull/3872>`__: MAINT: use scipy.linalg instead of np.dual
- `#3873 <https://github.com/scipy/scipy/pull/3873>`__: BLD: show a more informative message if Cython wasn't installed.
- `#3874 <https://github.com/scipy/scipy/pull/3874>`__: TST: cluster: cleanup the hierarchy test data
- `#3877 <https://github.com/scipy/scipy/pull/3877>`__: DOC: Savitzky-Golay filter version added
- `#3878 <https://github.com/scipy/scipy/pull/3878>`__: DOC: move versionadded to notes
- `#3879 <https://github.com/scipy/scipy/pull/3879>`__: small tweaks to the docs
- `#3881 <https://github.com/scipy/scipy/pull/3881>`__: FIX incorrect sorting during fancy assignment
- `#3885 <https://github.com/scipy/scipy/pull/3885>`__: kendalltau function now returns a nan tuple if empty arrays used...
- `#3886 <https://github.com/scipy/scipy/pull/3886>`__: BUG: fixing linprog's kwarg order to match docs
- `#3888 <https://github.com/scipy/scipy/pull/3888>`__: BUG: optimize: In _linprog_simplex, handle the case where the...
- `#3891 <https://github.com/scipy/scipy/pull/3891>`__: BUG: stats: Fix ValueError message in chi2_contingency.
- `#3892 <https://github.com/scipy/scipy/pull/3892>`__: DOC: sparse.linalg: Fix lobpcg docstring.
- `#3894 <https://github.com/scipy/scipy/pull/3894>`__: DOC: stats: Assorted docstring edits.
- `#3896 <https://github.com/scipy/scipy/pull/3896>`__: Fix 2 mistakes in MatrixMarket format parsing
- `#3897 <https://github.com/scipy/scipy/pull/3897>`__: BUG: associated Legendre function of second kind for 1<x<1.0001
- `#3899 <https://github.com/scipy/scipy/pull/3899>`__: BUG: fix undefined behavior in alngam
- `#3906 <https://github.com/scipy/scipy/pull/3906>`__: MAINT/DOC: Whitespace tweaks in several docstrings.
- `#3907 <https://github.com/scipy/scipy/pull/3907>`__: TST: relax bounds of interpolate test to accomodate rounding...
- `#3909 <https://github.com/scipy/scipy/pull/3909>`__: MAINT: Create a common version of `count_nonzero` for compatibility...
- `#3910 <https://github.com/scipy/scipy/pull/3910>`__: Fix a couple of test errors in master
- `#3911 <https://github.com/scipy/scipy/pull/3911>`__: Use MathJax for the html docs
- `#3914 <https://github.com/scipy/scipy/pull/3914>`__: Rework the _roots functions and document them.
- `#3916 <https://github.com/scipy/scipy/pull/3916>`__: Remove all linpack_lite code and replace with LAPACK routines
- `#3917 <https://github.com/scipy/scipy/pull/3917>`__: splines, constant extrapolation
- `#3918 <https://github.com/scipy/scipy/pull/3918>`__: DOC: tweak the rv_discrete docstring example
- `#3919 <https://github.com/scipy/scipy/pull/3919>`__: Quadrature speed-up: scipy.special.orthogonal.p_roots with cache
- `#3920 <https://github.com/scipy/scipy/pull/3920>`__: DOC: Clarify docstring for `sigma` parameter for `curve_fit`
- `#3922 <https://github.com/scipy/scipy/pull/3922>`__: Fixed Docstring issues in linprog (Fixes #3905).
- `#3924 <https://github.com/scipy/scipy/pull/3924>`__: Coerce args into tuple if necessary.
- `#3926 <https://github.com/scipy/scipy/pull/3926>`__: DOC: Surround stats class methods in docstrings with backticks.
- `#3927 <https://github.com/scipy/scipy/pull/3927>`__: Changed doc for romb's dx parameter to int.
- `#3928 <https://github.com/scipy/scipy/pull/3928>`__: check FITPACK conditions in LSQUnivariateSpline
- `#3929 <https://github.com/scipy/scipy/pull/3929>`__: Added a warning about leastsq using with NaNs.
- `#3930 <https://github.com/scipy/scipy/pull/3930>`__: ENH: optimize: curve_fit now warns if pcov is undetermined
- `#3932 <https://github.com/scipy/scipy/pull/3932>`__: Clarified the k > n case.
- `#3933 <https://github.com/scipy/scipy/pull/3933>`__: DOC: remove `import scipy as sp` abbreviation here and there
- `#3936 <https://github.com/scipy/scipy/pull/3936>`__: Add license and copyright holders to test data imported from...
- `#3938 <https://github.com/scipy/scipy/pull/3938>`__: DOC: Corrected documentation for return types.
- `#3939 <https://github.com/scipy/scipy/pull/3939>`__: DOC: fitpack: add a note about Sch-W conditions to splrep docstring
- `#3940 <https://github.com/scipy/scipy/pull/3940>`__: TST: integrate: Remove an invalid test of odeint.
- `#3942 <https://github.com/scipy/scipy/pull/3942>`__: FIX: Corrected error message of eigsh.
- `#3943 <https://github.com/scipy/scipy/pull/3943>`__: ENH: release GIL for filter and interpolation of ndimage
- `#3944 <https://github.com/scipy/scipy/pull/3944>`__: FIX: Raise value error if window data-type is unsupported
- `#3946 <https://github.com/scipy/scipy/pull/3946>`__: Fixed signal.get_window with unicode window name
- `#3947 <https://github.com/scipy/scipy/pull/3947>`__: MAINT: some docstring fixes and style cleanups in stats.mstats
- `#3949 <https://github.com/scipy/scipy/pull/3949>`__: DOC: fix a couple of issues in stats docstrings.
- `#3950 <https://github.com/scipy/scipy/pull/3950>`__: TST: sparse: remove known failure that doesn't fail
- `#3951 <https://github.com/scipy/scipy/pull/3951>`__: TST: switch from Rackspace wheelhouse to numpy/cython source...
- `#3952 <https://github.com/scipy/scipy/pull/3952>`__: DOC: stats: Small formatting correction to the 'chi' distribution...
- `#3953 <https://github.com/scipy/scipy/pull/3953>`__: DOC: stats: Several corrections and small additions to docstrings.
- `#3955 <https://github.com/scipy/scipy/pull/3955>`__: signal.__init__.py: remove duplicated `get_window` entry
- `#3959 <https://github.com/scipy/scipy/pull/3959>`__: TST: sparse: more "known failures" for DOK that don't fail
- `#3960 <https://github.com/scipy/scipy/pull/3960>`__: BUG: io.netcdf: do not close mmap if there are references left...
- `#3965 <https://github.com/scipy/scipy/pull/3965>`__: DOC: Fix a few more sphinx warnings that occur when building...
- `#3966 <https://github.com/scipy/scipy/pull/3966>`__: DOC: add guidelines for using test generators in HACKING
- `#3968 <https://github.com/scipy/scipy/pull/3968>`__: BUG: sparse.linalg: make Inv objects in arpack garbage-collectable...
- `#3971 <https://github.com/scipy/scipy/pull/3971>`__: Remove all linpack_lite code and replace with LAPACK routines
- `#3972 <https://github.com/scipy/scipy/pull/3972>`__: fix typo in error message
- `#3973 <https://github.com/scipy/scipy/pull/3973>`__: MAINT: better error message for multivariate normal.
- `#3981 <https://github.com/scipy/scipy/pull/3981>`__: turn the cryptically named scipy.special information theory functions...
- `#3984 <https://github.com/scipy/scipy/pull/3984>`__: Wrap her, syr, her2, syr2 blas routines
- `#3990 <https://github.com/scipy/scipy/pull/3990>`__: improve UnivariateSpline docs
- `#3991 <https://github.com/scipy/scipy/pull/3991>`__: ENH: stats: return namedtuple for describe output
- `#3993 <https://github.com/scipy/scipy/pull/3993>`__: DOC: stats: percentileofscore references np.percentile
- `#3997 <https://github.com/scipy/scipy/pull/3997>`__: BUG: linalg: pascal(35) was incorrect: last element overflowed...
- `#3998 <https://github.com/scipy/scipy/pull/3998>`__: MAINT: use isMaskedArray instead of is_masked to check type
- `#3999 <https://github.com/scipy/scipy/pull/3999>`__: TST: test against all of boost data files.
- `#4000 <https://github.com/scipy/scipy/pull/4000>`__: BUG: stats: Fix edge-case handling in a few distributions.
- `#4003 <https://github.com/scipy/scipy/pull/4003>`__: ENH: using python's warnings instead of prints in fitpack.
- `#4004 <https://github.com/scipy/scipy/pull/4004>`__: MAINT: optimize: remove a couple unused variables in zeros.c
- `#4006 <https://github.com/scipy/scipy/pull/4006>`__: BUG: Fix C90 compiler warnings in `NI_MinOrMaxFilter1D`
- `#4007 <https://github.com/scipy/scipy/pull/4007>`__: MAINT/DOC: Fix spelling of 'decomposition' in several files.
- `#4008 <https://github.com/scipy/scipy/pull/4008>`__: DOC: stats: Split the descriptions of the distributions in the...
- `#4015 <https://github.com/scipy/scipy/pull/4015>`__: TST: logsumexp regression test
- `#4016 <https://github.com/scipy/scipy/pull/4016>`__: MAINT: remove some inf-related warnings from logsumexp
- `#4020 <https://github.com/scipy/scipy/pull/4020>`__: DOC: stats: fix whitespace in docstrings of several distributions
- `#4023 <https://github.com/scipy/scipy/pull/4023>`__: Exactly one space required before assignments
- `#4024 <https://github.com/scipy/scipy/pull/4024>`__: In dendrogram(): Correct an argument name and a grammar issue...
- `#4041 <https://github.com/scipy/scipy/pull/4041>`__: BUG: misc: Ensure that the 'size' argument of PIL's 'resize'...
- `#4049 <https://github.com/scipy/scipy/pull/4049>`__: BUG: Return of _logpmf
- `#4051 <https://github.com/scipy/scipy/pull/4051>`__: BUG: expm of integer matrices
- `#4052 <https://github.com/scipy/scipy/pull/4052>`__: ENH: integrate: odeint: Handle exceptions in the callback functions.
- `#4053 <https://github.com/scipy/scipy/pull/4053>`__: BUG: stats: Refactor argument validation to avoid a unicode issue.
- `#4057 <https://github.com/scipy/scipy/pull/4057>`__: Added newline to scipy.sparse.linalg.svds documentation for correct...
- `#4058 <https://github.com/scipy/scipy/pull/4058>`__: MAINT: stats: Add note about change to scoreatpercentile in release...
- `#4059 <https://github.com/scipy/scipy/pull/4059>`__: ENH: interpolate: Allow splev to accept an n-dimensional array.
- `#4064 <https://github.com/scipy/scipy/pull/4064>`__: Documented the return value for scipy.signal.find_peaks_cwt
- `#4074 <https://github.com/scipy/scipy/pull/4074>`__: ENH: Support LinearOperator as input to svds
- `#4084 <https://github.com/scipy/scipy/pull/4084>`__: BUG: Match exception declarations in scipy/io/matlab/streams.pyx...
- `#4091 <https://github.com/scipy/scipy/pull/4091>`__: DOC: special: more clear instructions on how to evaluate polynomials
- `#4105 <https://github.com/scipy/scipy/pull/4105>`__: BUG: Workaround for SGEMV segfault in Accelerate
- `#4107 <https://github.com/scipy/scipy/pull/4107>`__: DOC: get rid of 'import \*' in examples
- `#4113 <https://github.com/scipy/scipy/pull/4113>`__: DOC: fix typos in distance.yule
- `#4114 <https://github.com/scipy/scipy/pull/4114>`__: MAINT C fixes
- `#4117 <https://github.com/scipy/scipy/pull/4117>`__: deprecate nanmean, nanmedian and nanstd in favor of their numpy...
- `#4126 <https://github.com/scipy/scipy/pull/4126>`__: scipy.io.idl: support description records and fix bug with null...
- `#4131 <https://github.com/scipy/scipy/pull/4131>`__: ENH: release GIL in more ndimage functions
- `#4132 <https://github.com/scipy/scipy/pull/4132>`__: MAINT: stats: fix a typo [skip ci]
- `#4145 <https://github.com/scipy/scipy/pull/4145>`__: DOC: Fix documentation error for nc chi-squared dist
- `#4150 <https://github.com/scipy/scipy/pull/4150>`__: Fix _nd_image.geometric_transform endianness bug
- `#4153 <https://github.com/scipy/scipy/pull/4153>`__: MAINT: remove use of deprecated numpy API in lib/lapack/ f2py...
- `#4156 <https://github.com/scipy/scipy/pull/4156>`__: MAINT: optimize: remove dead code
- `#4159 <https://github.com/scipy/scipy/pull/4159>`__: MAINT: optimize: clean up Zeros code
- `#4165 <https://github.com/scipy/scipy/pull/4165>`__: DOC: add missing special functions to __doc__
- `#4172 <https://github.com/scipy/scipy/pull/4172>`__: DOC: remove misleading procrustes docstring line
- `#4175 <https://github.com/scipy/scipy/pull/4175>`__: DOC: sparse: clarify CSC and CSR constructor usage
- `#4177 <https://github.com/scipy/scipy/pull/4177>`__: MAINT: enable np.matrix inputs to solve_discrete_lyapunov
- `#4179 <https://github.com/scipy/scipy/pull/4179>`__: TST: fix an intermittently failing test case for special.legendre
- `#4181 <https://github.com/scipy/scipy/pull/4181>`__: MAINT: remove unnecessary null checks before free
- `#4182 <https://github.com/scipy/scipy/pull/4182>`__: Ellipsoidal harmonics
- `#4183 <https://github.com/scipy/scipy/pull/4183>`__: Skip Cython build in Travis-CI
- `#4184 <https://github.com/scipy/scipy/pull/4184>`__: Pr 4074
- `#4187 <https://github.com/scipy/scipy/pull/4187>`__: Pr/3923
- `#4190 <https://github.com/scipy/scipy/pull/4190>`__: BUG: special: fix up ellip_harm build
- `#4193 <https://github.com/scipy/scipy/pull/4193>`__: BLD: fix msvc compiler errors
- `#4194 <https://github.com/scipy/scipy/pull/4194>`__: BUG: fix buffer dtype mismatch on win-amd64
- `#4199 <https://github.com/scipy/scipy/pull/4199>`__: ENH: Changed scipy.stats.describe output from datalen to nobs
- `#4201 <https://github.com/scipy/scipy/pull/4201>`__: DOC: add blas2 and nan* deprecations to the release notes
- `#4243 <https://github.com/scipy/scipy/pull/4243>`__: TST: bump test tolerances

View file

@ -1,12 +0,0 @@
==========================
SciPy 0.15.1 Release Notes
==========================
SciPy 0.15.1 is a bug-fix release with no new features compared to 0.15.0.
Issues fixed
------------
* `#4413 <https://github.com/scipy/scipy/pull/4413>`__: BUG: Tests too strict, f2py doesn't have to overwrite this array
* `#4417 <https://github.com/scipy/scipy/pull/4417>`__: BLD: avoid using NPY_API_VERSION to check not using deprecated...
* `#4418 <https://github.com/scipy/scipy/pull/4418>`__: Restore and deprecate scipy.linalg.calc_work

View file

@ -1,189 +0,0 @@
==========================
SciPy 0.16.0 Release Notes
==========================
.. note:: Scipy 0.16.0 is not released yet!
.. contents::
SciPy 0.16.0 is the culmination of X months of hard work. It contains
many new features, numerous bug-fixes, improved test coverage and
better documentation. There have been a number of deprecations and
API changes in this release, which are documented below. All users
are encouraged to upgrade to this release, as there are a large number
of bug-fixes and optimizations. Moreover, our development attention
will now shift to bug-fix releases on the 0.15.x branch, and on adding
new features on the master branch.
This release requires Python 2.6, 2.7 or 3.2-3.4 and NumPy 1.6.2 or greater.
Highlights of this release include:
- A Cython API for BLAS/LAPACK in `scipy.linalg`
- A new benchmark suite. It's now straightforward to add new benchmarks, and
they're routinely included with performance enhancement PRs.
- Support for the second order sections (SOS) format in `scipy.signal`.
New features
============
Benchmark suite
---------------
The benchmark suite has switched to using `Airspeed Velocity
<http://spacetelescope.github.io/asv/>`__ for benchmarking. You can
run the suite locally via ``python runtests.py --bench``. For more
details, see ``benchmarks/README.rst``.
`scipy.linalg` improvements
---------------------------
A full set of Cython wrappers for BLAS and LAPACK has been added in the
modules `scipy.linalg.cython_blas` and `scipy.linalg.cython_lapack`.
In Cython, these wrappers can now be cimported from their corresponding
modules and used without linking directly against BLAS or LAPACK.
The functions `scipy.linalg.qr_delete`, `scipy.linalg.qr_insert` and
`scipy.linalg.qr_update` for updating QR decompositions were added.
The function `scipy.linalg.solve_circulant` solves a linear system with
a circulant coefficient matrix.
The function `scipy.linalg.invpascal` computes the inverse of a Pascal matrix.
The function `scipy.linalg.solve_toeplitz`, a Levinson-Durbin Toeplitz solver,
was added.
Added wrapper for potentially useful LAPACK function ``*lasd4``. It computes
the square root of the i-th updated eigenvalue of a positive symmetric rank-one
modification to a positive diagonal matrix. See its LAPACK documentation and
unit tests for it to get more info.
Added two extra wrappers for LAPACK least-square solvers. Namely, they are
``*gelsd`` and ``*gelsy``.
Wrappers for the LAPACK ``*lange`` functions, which calculate various matrix
norms, were added.
Wrappers for ``*gtsv`` and ``*ptsv``, which solve ``A*X = B`` for tri-diagonal
matrix ``A``, were added.
`scipy.signal` improvements
---------------------------
Support for second order sections (SOS) as a format for IIR filters
was added. The new functions are:
* `scipy.signal.sosfilt`
* `scipy.signal.sosfilt_zi`,
* `scipy.signal.sos2tf`
* `scipy.signal.sos2zpk`
* `scipy.signal.tf2sos`
* `scipy.signal.zpk2sos`.
Additionally, the filter design functions `iirdesign`, `iirfilter`, `butter`,
`cheby1`, `cheby2`, `ellip`, and `bessel` can return the filter in the SOS
format.
The function `scipy.signal.place_poles`, which provides two methods to place
poles for linear systems, was added.
The option to use Gustafsson's method for choosing the initial conditions
of the forward and backward passes was added to `scipy.signal.filtfilt`.
An exponential (Poisson) window was added as `scipy.signal.exponential`, and a
Tukey window was added as `scipy.signal.tukey`.
The function for computing digital filter group delay was added as
`scipy.signal.group_delay`.
`scipy.sparse` improvements
---------------------------
The function `scipy.sparse.norm`, which computes sparse matrix norms, was
added.
The function `scipy.sparse.random`, which allows to draw random variates from
an arbitrary distribution, was added.
`scipy.spatial` improvements
----------------------------
The function `scipy.spatial.procrustes` for Procrustes analysis (statistical
shape analysis) was added.
`scipy.stats` improvements
--------------------------
The Wishart distribution and its inverse have been added, as
`scipy.stats.wishart` and `scipy.stats.invwishart`.
The Exponentially Modified Normal distribution has been
added as `scipy.stats.exponnorm`.
The Generalized Normal distribution has been added as `scipy.stats.gennorm`.
All distributions now contain a ``random_state`` property and allow specifying a
specific ``numpy.random.RandomState`` random number generator when generating
random variates.
`scipy.optimize` improvements
-----------------------------
A new derivative-free method DF-SANE has been added to the nonlinear equation
system solving function `scipy.optimize.root`.
Deprecated features
===================
``scipy.stats.pdf_fromgamma`` is deprecated. This function was undocumented,
untested and rarely used. Statsmodels provides equivalent functionality
with ``statsmodels.distributions.ExpandedNormal``.
``scipy.stats.fastsort`` is deprecated. This function is unnecessary,
``numpy.argsort`` can be used instead.
``scipy.stats.signaltonoise`` and ``scipy.stats.mstats.signaltonoise`` are
deprecated. These functions did not belong in ``scipy.stats`` and are rarely
used. See issue #609 for details.
``scipy.stats.histogram2`` is deprecated. This function is unnecessary,
``numpy.histogram2d`` can be used instead.
Backwards incompatible changes
==============================
The deprecated global optimizer ``scipy.optimize.anneal`` was removed.
The following deprecated modules have been removed: ``scipy.lib.blas``,
``scipy.lib.lapack``, ``scipy.linalg.cblas``, ``scipy.linalg.fblas``,
``scipy.linalg.clapack``, ``scipy.linalg.flapack``. They had been deprecated
since Scipy 0.12.0, the functionality should be accessed as `scipy.linalg.blas`
and `scipy.linalg.lapack`.
The deprecated function ``scipy.special.all_mat`` has been removed.
The deprecated functions ``fprob``, ``ksprob``, ``zprob``, ``randwcdf``
and ``randwppf`` have been removed from `scipy.stats`.
Other changes
=============
The version numbering for development builds has been updated to comply with PEP 440.
Building with ``python setup.py develop`` is now supported.
Authors
=======
Issues closed
-------------
Pull requests
-------------

View file

@ -1,73 +0,0 @@
==========================
SciPy 0.17.0 Release Notes
==========================
.. note:: Scipy 0.17.0 is not released yet!
.. contents::
SciPy 0.17.0 is the culmination of X months of hard work. It contains
many new features, numerous bug-fixes, improved test coverage and
better documentation. There have been a number of deprecations and
API changes in this release, which are documented below. All users
are encouraged to upgrade to this release, as there are a large number
of bug-fixes and optimizations. Moreover, our development attention
will now shift to bug-fix releases on the 0.16.x branch, and on adding
new features on the master branch.
This release requires Python 2.6, 2.7 or 3.2-3.4 and NumPy 1.6.2 or greater.
New features
============
Deprecated features
===================
``scipy.stats.threshold`` and ``scipy.mstats.threshold`` are being deprecated
in favor of ``np.clip``. See issue #617 for details.
``scipy.stats.ss`` is deprecated. This is a support function, not meant to
be exposed to the user. Also, the name is unclear. See issue #663 for details.
``scipy.stats.square_of_sums`` is deprecated. This too is a support function
not meant to be exposed to the user. See issues #665 and #663 for details.
``scipy.stats.f_value``, ``scipy.stats.f_value_multivariate``,
``scipy.stats.f_value_wilks_lambda``, and ``scipy.mstats.f_value_wilks_lambda``
are deprecated. These are related to ANOVA, for which ``scipy.stats`` provides
quite limited functionality and these functions are not very useful standalone.
See issues #660 and #650 for details.
``scipy.stats.chisqprob`` is deprecated. This is an alias. ``stats.chi2.sf``
should be used instead.
``scipy.stats.betai`` is deprecated. This is an alias for ``special.betainc``
which should be used instead.
Backwards incompatible changes
==============================
The functions ``stats.trim1`` and ``stats.trimboth`` now make sure the
elements trimmed are the lowest and/or highest, depending on the case.
Slicing without at least partial sorting was previously done, but didn't
make sense for unsorted input.
When ``variable_names`` is set to an empty list, ``scipy.io.loadmat`` now
correctly returns no values instead of all the contents of the MAT file.
Other changes
=============
Authors
=======
Issues closed
-------------
Pull requests
-------------

View file

@ -1,420 +0,0 @@
=========================
SciPy 0.7.0 Release Notes
=========================
.. contents::
SciPy 0.7.0 is the culmination of 16 months of hard work. It contains
many new features, numerous bug-fixes, improved test coverage and
better documentation. There have been a number of deprecations and
API changes in this release, which are documented below. All users
are encouraged to upgrade to this release, as there are a large number
of bug-fixes and optimizations. Moreover, our development attention
will now shift to bug-fix releases on the 0.7.x branch, and on adding
new features on the development trunk. This release requires Python
2.4 or 2.5 and NumPy 1.2 or greater.
Please note that SciPy is still considered to have "Beta" status, as
we work toward a SciPy 1.0.0 release. The 1.0.0 release will mark a
major milestone in the development of SciPy, after which changing the
package structure or API will be much more difficult. Whilst these
pre-1.0 releases are considered to have "Beta" status, we are
committed to making them as bug-free as possible. For example, in
addition to fixing numerous bugs in this release, we have also doubled
the number of unit tests since the last release.
However, until the 1.0 release, we are aggressively reviewing and
refining the functionality, organization, and interface. This is being
done in an effort to make the package as coherent, intuitive, and
useful as possible. To achieve this, we need help from the community
of users. Specifically, we need feedback regarding all aspects of the
project - everything - from which algorithms we implement, to details
about our function's call signatures.
Over the last year, we have seen a rapid increase in community
involvement, and numerous infrastructure improvements to lower the
barrier to contributions (e.g., more explicit coding standards,
improved testing infrastructure, better documentation tools). Over
the next year, we hope to see this trend continue and invite everyone
to become more involved.
Python 2.6 and 3.0
<<<<<<< HEAD
------------------
=======
==================
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
A significant amount of work has gone into making SciPy compatible
with Python 2.6; however, there are still some issues in this regard.
The main issue with 2.6 support is NumPy. On UNIX (including Mac OS
X), NumPy 1.2.1 mostly works, with a few caveats. On Windows, there
are problems related to the compilation process. The upcoming NumPy
1.3 release will fix these problems. Any remaining issues with 2.6
support for SciPy 0.7 will be addressed in a bug-fix release.
Python 3.0 is not supported at all; it requires NumPy to be ported to
Python 3.0. This requires immense effort, since a lot of C code has
to be ported. The transition to 3.0 is still under consideration;
currently, we don't have any timeline or roadmap for this transition.
Major documentation improvements
<<<<<<< HEAD
--------------------------------
=======
================================
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
SciPy documentation is greatly improved; you can view a HTML reference
manual `online <http://docs.scipy.org/>`__ or download it as a PDF
file. The new reference guide was built using the popular `Sphinx tool
<http://sphinx.pocoo.org/>`__.
This release also includes an updated tutorial, which hadn't been
available since SciPy was ported to NumPy in 2005. Though not
comprehensive, the tutorial shows how to use several essential parts
of Scipy. It also includes the ``ndimage`` documentation from the
``numarray`` manual.
Nevertheless, more effort is needed on the documentation front.
Luckily, contributing to Scipy documentation is now easier than
before: if you find that a part of it requires improvements, and want
to help us out, please register a user name in our web-based
documentation editor at http://docs.scipy.org/ and correct the issues.
Running Tests
<<<<<<< HEAD
-------------
=======
=============
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
NumPy 1.2 introduced a new testing framework based on `nose
<http://somethingaboutorange.com/mrl/projects/nose/>`__. Starting with
this release, SciPy now uses the new NumPy test framework as well.
Taking advantage of the new testing framework requires ``nose``
version 0.10, or later. One major advantage of the new framework is
that it greatly simplifies writing unit tests - which has all ready
paid off, given the rapid increase in tests. To run the full test
suite::
>>> import scipy
>>> scipy.test('full')
For more information, please see `The NumPy/SciPy Testing Guide
<http://projects.scipy.org/scipy/numpy/wiki/TestingGuidelines>`__.
We have also greatly improved our test coverage. There were just over
2,000 unit tests in the 0.6.0 release; this release nearly doubles
that number, with just over 4,000 unit tests.
Building SciPy
<<<<<<< HEAD
--------------
=======
==============
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
Support for NumScons has been added. NumScons is a tentative new build
system for NumPy/SciPy, using `SCons <http://www.scons.org/>`__ at its
core.
SCons is a next-generation build system, intended to replace the
venerable ``Make`` with the integrated functionality of
``autoconf``/``automake`` and ``ccache``. Scons is written in Python
and its configuration files are Python scripts. NumScons is meant to
replace NumPy's custom version of ``distutils`` providing more
advanced functionality, such as ``autoconf``, improved fortran
support, more tools, and support for ``numpy.distutils``/``scons``
cooperation.
Sandbox Removed
<<<<<<< HEAD
---------------
=======
===============
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
While porting SciPy to NumPy in 2005, several packages and modules
were moved into ``scipy.sandbox``. The sandbox was a staging ground
for packages that were undergoing rapid development and whose APIs
were in flux. It was also a place where broken code could live. The
sandbox has served its purpose well, but was starting to create
confusion. Thus ``scipy.sandbox`` was removed. Most of the code was
moved into ``scipy``, some code was made into a ``scikit``, and the
remaining code was just deleted, as the functionality had been
replaced by other code.
Sparse Matrices
<<<<<<< HEAD
---------------
=======
===============
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
Sparse matrices have seen extensive improvements. There is now
support for integer dtypes such ``int8``, ``uint32``, etc. Two new
sparse formats were added:
* new class ``dia_matrix`` : the sparse DIAgonal format
* new class ``bsr_matrix`` : the Block CSR format
Several new sparse matrix construction functions were added:
* ``sparse.kron`` : sparse Kronecker product
* ``sparse.bmat`` : sparse version of ``numpy.bmat``
* ``sparse.vstack`` : sparse version of ``numpy.vstack``
* ``sparse.hstack`` : sparse version of ``numpy.hstack``
Extraction of submatrices and nonzero values have been added:
* ``sparse.tril`` : extract lower triangle
* ``sparse.triu`` : extract upper triangle
* ``sparse.find`` : nonzero values and their indices
``csr_matrix`` and ``csc_matrix`` now support slicing and fancy
indexing (e.g., ``A[1:3, 4:7]`` and ``A[[3,2,6,8],:]``). Conversions
among all sparse formats are now possible:
* using member functions such as ``.tocsr()`` and ``.tolil()``
* using the ``.asformat()`` member function, e.g. ``A.asformat('csr')``
* using constructors ``A = lil_matrix([[1,2]]); B = csr_matrix(A)``
All sparse constructors now accept dense matrices and lists of lists.
For example:
* ``A = csr_matrix( rand(3,3) )`` and ``B = lil_matrix( [[1,2],[3,4]] )``
The handling of diagonals in the ``spdiags`` function has been changed.
It now agrees with the MATLAB(TM) function of the same name.
Numerous efficiency improvements to format conversions and sparse
matrix arithmetic have been made. Finally, this release contains
numerous bugfixes.
Statistics package
<<<<<<< HEAD
------------------
=======
==================
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
Statistical functions for masked arrays have been added, and are
accessible through ``scipy.stats.mstats``. The functions are similar
to their counterparts in ``scipy.stats`` but they have not yet been
verified for identical interfaces and algorithms.
Several bugs were fixed for statistical functions, of those,
``kstest`` and ``percentileofscore`` gained new keyword arguments.
Added deprecation warning for ``mean``, ``median``, ``var``, ``std``,
``cov``, and ``corrcoef``. These functions should be replaced by their
numpy counterparts. Note, however, that some of the default options
differ between the ``scipy.stats`` and numpy versions of these
functions.
Numerous bug fixes to ``stats.distributions``: all generic methods now
work correctly, several methods in individual distributions were
corrected. However, a few issues remain with higher moments (``skew``,
``kurtosis``) and entropy. The maximum likelihood estimator, ``fit``,
does not work out-of-the-box for some distributions - in some cases,
starting values have to be carefully chosen, in other cases, the
generic implementation of the maximum likelihood method might not be
the numerically appropriate estimation method.
We expect more bugfixes, increases in numerical precision and
enhancements in the next release of scipy.
Reworking of IO package
<<<<<<< HEAD
-----------------------
=======
=======================
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
The IO code in both NumPy and SciPy is being extensively
reworked. NumPy will be where basic code for reading and writing NumPy
arrays is located, while SciPy will house file readers and writers for
various data formats (data, audio, video, images, matlab, etc.).
Several functions in ``scipy.io`` have been deprecated and will be
removed in the 0.8.0 release including ``npfile``, ``save``, ``load``,
``create_module``, ``create_shelf``, ``objload``, ``objsave``,
``fopen``, ``read_array``, ``write_array``, ``fread``, ``fwrite``,
``bswap``, ``packbits``, ``unpackbits``, and ``convert_objectarray``.
Some of these functions have been replaced by NumPy's raw reading and
writing capabilities, memory-mapping capabilities, or array methods.
Others have been moved from SciPy to NumPy, since basic array reading
and writing capability is now handled by NumPy.
The Matlab (TM) file readers/writers have a number of improvements:
* default version 5
* v5 writers for structures, cell arrays, and objects
* v5 readers/writers for function handles and 64-bit integers
* new struct_as_record keyword argument to ``loadmat``, which loads
struct arrays in matlab as record arrays in numpy
* string arrays have ``dtype='U...'`` instead of ``dtype=object``
* ``loadmat`` no longer squeezes singleton dimensions, i.e.
``squeeze_me=False`` by default
New Hierarchical Clustering module
<<<<<<< HEAD
----------------------------------
=======
==================================
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
This module adds new hierarchical clustering functionality to the
``scipy.cluster`` package. The function interfaces are similar to the
functions provided MATLAB(TM)'s Statistics Toolbox to help facilitate
easier migration to the NumPy/SciPy framework. Linkage methods
implemented include single, complete, average, weighted, centroid,
median, and ward.
In addition, several functions are provided for computing
inconsistency statistics, cophenetic distance, and maximum distance
between descendants. The ``fcluster`` and ``fclusterdata`` functions
transform a hierarchical clustering into a set of flat clusters. Since
these flat clusters are generated by cutting the tree into a forest of
trees, the ``leaders`` function takes a linkage and a flat clustering,
and finds the root of each tree in the forest. The ``ClusterNode``
class represents a hierarchical clusterings as a field-navigable tree
object. ``to_tree`` converts a matrix-encoded hierarchical clustering
to a ``ClusterNode`` object. Routines for converting between MATLAB
and SciPy linkage encodings are provided. Finally, a ``dendrogram``
function plots hierarchical clusterings as a dendrogram, using
matplotlib.
New Spatial package
<<<<<<< HEAD
-------------------
=======
===================
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
The new spatial package contains a collection of spatial algorithms
and data structures, useful for spatial statistics and clustering
applications. It includes rapidly compiled code for computing exact
and approximate nearest neighbors, as well as a pure-python kd-tree
with the same interface, but that supports annotation and a variety of
other algorithms. The API for both modules may change somewhat, as
user requirements become clearer.
It also includes a ``distance`` module, containing a collection of
distance and dissimilarity functions for computing distances between
vectors, which is useful for spatial statistics, clustering, and
kd-trees. Distance and dissimilarity functions provided include
Bray-Curtis, Canberra, Chebyshev, City Block, Cosine, Dice, Euclidean,
Hamming, Jaccard, Kulsinski, Mahalanobis, Matching, Minkowski,
Rogers-Tanimoto, Russell-Rao, Squared Euclidean, Standardized
Euclidean, Sokal-Michener, Sokal-Sneath, and Yule.
The ``pdist`` function computes pairwise distance between all
unordered pairs of vectors in a set of vectors. The ``cdist`` computes
the distance on all pairs of vectors in the Cartesian product of two
sets of vectors. Pairwise distance matrices are stored in condensed
form; only the upper triangular is stored. ``squareform`` converts
distance matrices between square and condensed forms.
Reworked fftpack package
<<<<<<< HEAD
------------------------
=======
========================
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
FFTW2, FFTW3, MKL and DJBFFT wrappers have been removed. Only (NETLIB)
fftpack remains. By focusing on one backend, we hope to add new
features - like float32 support - more easily.
New Constants package
<<<<<<< HEAD
---------------------
=======
=====================
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
``scipy.constants`` provides a collection of physical constants and
conversion factors. These constants are taken from CODATA Recommended
Values of the Fundamental Physical Constants: 2002. They may be found
at physics.nist.gov/constants. The values are stored in the dictionary
physical_constants as a tuple containing the value, the units, and the
relative precision - in that order. All constants are in SI units,
unless otherwise stated. Several helper functions are provided.
New Radial Basis Function module
<<<<<<< HEAD
--------------------------------
=======
================================
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
``scipy.interpolate`` now contains a Radial Basis Function module.
Radial basis functions can be used for smoothing/interpolating
scattered data in n-dimensions, but should be used with caution for
extrapolation outside of the observed data range.
New complex ODE integrator
<<<<<<< HEAD
--------------------------
=======
==========================
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
``scipy.integrate.ode`` now contains a wrapper for the ZVODE
complex-valued ordinary differential equation solver (by Peter
N. Brown, Alan C. Hindmarsh, and George D. Byrne).
New generalized symmetric and hermitian eigenvalue problem solver
<<<<<<< HEAD
-----------------------------------------------------------------
=======
=================================================================
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
``scipy.linalg.eigh`` now contains wrappers for more LAPACK symmetric
and hermitian eigenvalue problem solvers. Users can now solve
generalized problems, select a range of eigenvalues only, and choose
to use a faster algorithm at the expense of increased memory
usage. The signature of the ``scipy.linalg.eigh`` changed accordingly.
Bug fixes in the interpolation package
<<<<<<< HEAD
--------------------------------------
=======
======================================
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
The shape of return values from ``scipy.interpolate.interp1d`` used to
be incorrect, if interpolated data had more than 2 dimensions and the
axis keyword was set to a non-default value. This has been fixed.
Moreover, ``interp1d`` returns now a scalar (0D-array) if the input
is a scalar. Users of ``scipy.interpolate.interp1d`` may need to
revise their code if it relies on the previous behavior.
Weave clean up
<<<<<<< HEAD
--------------
=======
==============
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
There were numerous improvements to ``scipy.weave``. ``blitz++`` was
relicensed by the author to be compatible with the SciPy license.
``wx_spec.py`` was removed.
Known problems
<<<<<<< HEAD
--------------
=======
==============
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
Here are known problems with scipy 0.7.0:
* weave test failures on windows: those are known, and are being revised.
* weave test failure with gcc 4.3 (std::labs): this is a gcc 4.3 bug. A
workaround is to add #include <cstdlib> in
scipy/weave/blitz/blitz/funcs.h (line 27). You can make the change in
the installed scipy (in site-packages).

View file

@ -1,88 +0,0 @@
=========================
SciPy 0.7.1 Release Notes
=========================
.. contents::
SciPy 0.7.1 is a bug-fix release with no new features compared to 0.7.0.
scipy.io
========
Bugs fixed:
- Several fixes in Matlab file IO
scipy.odr
=========
Bugs fixed:
- Work around a failure with Python 2.6
scipy.signal
============
Memory leak in lfilter have been fixed, as well as support for array object
Bugs fixed:
- #880, #925: lfilter fixes
- #871: bicgstab fails on Win32
scipy.sparse
============
Bugs fixed:
- #883: scipy.io.mmread with scipy.sparse.lil_matrix broken
- lil_matrix and csc_matrix reject now unexpected sequences,
cf. http://thread.gmane.org/gmane.comp.python.scientific.user/19996
scipy.special
=============
Several bugs of varying severity were fixed in the special functions:
- #503, #640: iv: problems at large arguments fixed by new implementation
- #623: jv: fix errors at large arguments
- #679: struve: fix wrong output for v < 0
- #803: pbdv produces invalid output
- #804: lqmn: fix crashes on some input
- #823: betainc: fix documentation
- #834: exp1 strange behavior near negative integer values
- #852: jn_zeros: more accurate results for large s, also in jnp/yn/ynp_zeros
- #853: jv, yv, iv: invalid results for non-integer v < 0, complex x
- #854: jv, yv, iv, kv: return nan more consistently when out-of-domain
- #927: ellipj: fix segfault on Windows
- #946: ellpj: fix segfault on Mac OS X/python 2.6 combination.
- ive, jve, yve, kv, kve: with real-valued input, return nan for out-of-domain
instead of returning only the real part of the result.
Also, when ``scipy.special.errprint(1)`` has been enabled, warning
messages are now issued as Python warnings instead of printing them to
stderr.
scipy.stats
===========
- linregress, mannwhitneyu, describe: errors fixed
- kstwobign, norm, expon, exponweib, exponpow, frechet, genexpon, rdist,
truncexpon, planck: improvements to numerical accuracy in distributions
Windows binaries for python 2.6
===============================
python 2.6 binaries for windows are now included. The binary for python 2.5
requires numpy 1.2.0 or above, and the one for python 2.6 requires numpy
1.3.0 or above.
Universal build for scipy
=========================
Mac OS X binary installer is now a proper universal build, and does not depend
on gfortran anymore (libgfortran is statically linked). The python 2.5 version
of scipy requires numpy 1.2.0 or above, the python 2.6 version requires numpy
1.3.0 or above.

View file

@ -1,10 +0,0 @@
=========================
SciPy 0.7.2 Release Notes
=========================
.. contents::
SciPy 0.7.2 is a bug-fix release with no new features compared to 0.7.1. The
only change is that all C sources from Cython code have been regenerated with
Cython 0.12.1. This fixes the incompatibility between binaries of SciPy 0.7.1
and NumPy 1.4.

View file

@ -1,263 +0,0 @@
=========================
SciPy 0.8.0 Release Notes
=========================
.. contents::
SciPy 0.8.0 is the culmination of 17 months of hard work. It contains
many new features, numerous bug-fixes, improved test coverage and
better documentation. There have been a number of deprecations and
API changes in this release, which are documented below. All users
are encouraged to upgrade to this release, as there are a large number
of bug-fixes and optimizations. Moreover, our development attention
will now shift to bug-fix releases on the 0.8.x branch, and on adding
new features on the development trunk. This release requires Python
2.4 - 2.6 and NumPy 1.4.1 or greater.
Please note that SciPy is still considered to have "Beta" status, as
we work toward a SciPy 1.0.0 release. The 1.0.0 release will mark a
major milestone in the development of SciPy, after which changing the
package structure or API will be much more difficult. Whilst these
pre-1.0 releases are considered to have "Beta" status, we are
committed to making them as bug-free as possible.
However, until the 1.0 release, we are aggressively reviewing and
refining the functionality, organization, and interface. This is being
done in an effort to make the package as coherent, intuitive, and
useful as possible. To achieve this, we need help from the community
of users. Specifically, we need feedback regarding all aspects of the
project - everything - from which algorithms we implement, to details
about our function's call signatures.
Python 3
========
Python 3 compatibility is planned and is currently technically
feasible, since Numpy has been ported. However, since the Python 3
compatible Numpy 1.5 has not been released yet, support for Python 3
in Scipy is not yet included in Scipy 0.8. SciPy 0.9, planned for fall
2010, will very likely include experimental support for Python 3.
Major documentation improvements
================================
SciPy documentation is greatly improved.
Deprecated features
===================
Swapping inputs for correlation functions (scipy.signal)
--------------------------------------------------------
Concern correlate, correlate2d, convolve and convolve2d. If the second input is
larger than the first input, the inputs are swapped before calling the
underlying computation routine. This behavior is deprecated, and will be
removed in scipy 0.9.0.
Obsolete code deprecated (scipy.misc)
-------------------------------------
The modules `helpmod`, `ppimport` and `pexec` from `scipy.misc` are deprecated.
They will be removed from SciPy in version 0.9.
Additional deprecations
-----------------------
* linalg: The function `solveh_banded` currently returns a tuple containing
the Cholesky factorization and the solution to the linear system. In
SciPy 0.9, the return value will be just the solution.
* The function `constants.codata.find` will generate a DeprecationWarning.
In Scipy version 0.8.0, the keyword argument 'disp' was added to the
function, with the default value 'True'. In 0.9.0, the default will be
'False'.
* The `qshape` keyword argument of `signal.chirp` is deprecated. Use
the argument `vertex_zero` instead.
* Passing the coefficients of a polynomial as the argument `f0` to
`signal.chirp` is deprecated. Use the function `signal.sweep_poly`
instead.
* The `io.recaster` module has been deprecated and will be removed in 0.9.0.
New features
============
DCT support (scipy.fftpack)
---------------------------
New realtransforms have been added, namely dct and idct for Discrete Cosine
Transform; type I, II and III are available.
Single precision support for fft functions (scipy.fftpack)
----------------------------------------------------------
fft functions can now handle single precision inputs as well: fft(x) will
return a single precision array if x is single precision.
At the moment, for FFT sizes that are not composites of 2, 3, and 5, the
transform is computed internally in double precision to avoid rounding error in
FFTPACK.
Correlation functions now implement the usual definition (scipy.signal)
-----------------------------------------------------------------------
The outputs should now correspond to their matlab and R counterparts, and do
what most people expect if the old_behavior=False argument is passed:
* correlate, convolve and their 2d counterparts do not swap their inputs
depending on their relative shape anymore;
* correlation functions now conjugate their second argument while computing
the slided sum-products, which correspond to the usual definition of
correlation.
Additions and modification to LTI functions (scipy.signal)
----------------------------------------------------------
* The functions `impulse2` and `step2` were added to `scipy.signal`.
They use the function `scipy.signal.lsim2` to compute the impulse and
step response of a system, respectively.
* The function `scipy.signal.lsim2` was changed to pass any additional
keyword arguments to the ODE solver.
Improved waveform generators (scipy.signal)
-------------------------------------------
Several improvements to the `chirp` function in `scipy.signal` were made:
* The waveform generated when `method="logarithmic"` was corrected; it
now generates a waveform that is also known as an "exponential" or
"geometric" chirp. (See http://en.wikipedia.org/wiki/Chirp.)
* A new `chirp` method, "hyperbolic", was added.
* Instead of the keyword `qshape`, `chirp` now uses the keyword
`vertex_zero`, a boolean.
* `chirp` no longer handles an arbitrary polynomial. This functionality
has been moved to a new function, `sweep_poly`.
A new function, `sweep_poly`, was added.
New functions and other changes in scipy.linalg
-----------------------------------------------
The functions `cho_solve_banded`, `circulant`, `companion`, `hadamard` and
`leslie` were added to `scipy.linalg`.
The function `block_diag` was enhanced to accept scalar and 1D arguments,
along with the usual 2D arguments.
New function and changes in scipy.optimize
------------------------------------------
The `curve_fit` function has been added; it takes a function and uses
non-linear least squares to fit that to the provided data.
The `leastsq` and `fsolve` functions now return an array of size one instead of
a scalar when solving for a single parameter.
New sparse least squares solver
-------------------------------
The `lsqr` function was added to `scipy.sparse`. `This routine
<http://www.stanford.edu/group/SOL/software/lsqr.html>`_ finds a
least-squares solution to a large, sparse, linear system of equations.
ARPACK-based sparse SVD
-----------------------
A naive implementation of SVD for sparse matrices is available in
scipy.sparse.linalg.eigen.arpack. It is based on using an symmetric solver on
<A, A>, and as such may not be very precise.
Alternative behavior available for `scipy.constants.find`
---------------------------------------------------------
The keyword argument `disp` was added to the function `scipy.constants.find`,
with the default value `True`. When `disp` is `True`, the behavior is the
same as in Scipy version 0.7. When `False`, the function returns the list of
keys instead of printing them. (In SciPy version 0.9, the default will be
reversed.)
Incomplete sparse LU decompositions
-----------------------------------
Scipy now wraps SuperLU version 4.0, which supports incomplete sparse LU
decompositions. These can be accessed via `scipy.sparse.linalg.spilu`.
Upgrade to SuperLU 4.0 also fixes some known bugs.
Faster matlab file reader and default behavior change
------------------------------------------------------
We've rewritten the matlab file reader in Cython and it should now read
matlab files at around the same speed that Matlab does.
The reader reads matlab named and anonymous functions, but it can't
write them.
Until scipy 0.8.0 we have returned arrays of matlab structs as numpy
object arrays, where the objects have attributes named for the struct
fields. As of 0.8.0, we return matlab structs as numpy structured
arrays. You can get the older behavior by using the optional
``struct_as_record=False`` keyword argument to `scipy.io.loadmat` and
friends.
There is an inconsistency in the matlab file writer, in that it writes
numpy 1D arrays as column vectors in matlab 5 files, and row vectors in
matlab 4 files. We will change this in the next version, so both write
row vectors. There is a `FutureWarning` when calling the writer to warn
of this change; for now we suggest using the ``oned_as='row'`` keyword
argument to `scipy.io.savemat` and friends.
Faster evaluation of orthogonal polynomials
-------------------------------------------
Values of orthogonal polynomials can be evaluated with new vectorized functions
in `scipy.special`: `eval_legendre`, `eval_chebyt`, `eval_chebyu`,
`eval_chebyc`, `eval_chebys`, `eval_jacobi`, `eval_laguerre`,
`eval_genlaguerre`, `eval_hermite`, `eval_hermitenorm`,
`eval_gegenbauer`, `eval_sh_legendre`, `eval_sh_chebyt`,
`eval_sh_chebyu`, `eval_sh_jacobi`. This is faster than constructing the
full coefficient representation of the polynomials, which was previously the
only available way.
Note that the previous orthogonal polynomial routines will now also invoke this
feature, when possible.
Lambert W function
------------------
`scipy.special.lambertw` can now be used for evaluating the Lambert W
function.
Improved hypergeometric 2F1 function
------------------------------------
Implementation of `scipy.special.hyp2f1` for real parameters was revised.
The new version should produce accurate values for all real parameters.
More flexible interface for Radial basis function interpolation
---------------------------------------------------------------
The `scipy.interpolate.Rbf` class now accepts a callable as input for the
"function" argument, in addition to the built-in radial basis functions which
can be selected with a string argument.
Removed features
================
scipy.stsci: the package was removed
The module `scipy.misc.limits` was removed.
scipy.io
--------
The IO code in both NumPy and SciPy is being extensively
reworked. NumPy will be where basic code for reading and writing NumPy
arrays is located, while SciPy will house file readers and writers for
various data formats (data, audio, video, images, matlab, etc.).
Several functions in `scipy.io` are removed in the 0.8.0 release including:
`npfile`, `save`, `load`, `create_module`, `create_shelf`,
`objload`, `objsave`, `fopen`, `read_array`, `write_array`,
`fread`, `fwrite`, `bswap`, `packbits`, `unpackbits`, and
`convert_objectarray`. Some of these functions have been replaced by NumPy's
raw reading and writing capabilities, memory-mapping capabilities, or array
methods. Others have been moved from SciPy to NumPy, since basic array reading
and writing capability is now handled by NumPy.

View file

@ -1,230 +0,0 @@
=========================
SciPy 0.9.0 Release Notes
=========================
.. contents::
SciPy 0.9.0 is the culmination of 6 months of hard work. It contains
many new features, numerous bug-fixes, improved test coverage and
better documentation. There have been a number of deprecations and
API changes in this release, which are documented below. All users
are encouraged to upgrade to this release, as there are a large number
of bug-fixes and optimizations. Moreover, our development attention
will now shift to bug-fix releases on the 0.9.x branch, and on adding
new features on the development trunk.
This release requires Python 2.4 - 2.7 or 3.1 - and NumPy 1.5 or greater.
Please note that SciPy is still considered to have "Beta" status, as
we work toward a SciPy 1.0.0 release. The 1.0.0 release will mark a
major milestone in the development of SciPy, after which changing the
package structure or API will be much more difficult. Whilst these
pre-1.0 releases are considered to have "Beta" status, we are
committed to making them as bug-free as possible.
However, until the 1.0 release, we are aggressively reviewing and
refining the functionality, organization, and interface. This is being
done in an effort to make the package as coherent, intuitive, and
useful as possible. To achieve this, we need help from the community
of users. Specifically, we need feedback regarding all aspects of the
project - everything - from which algorithms we implement, to details
about our function's call signatures.
Python 3
========
Scipy 0.9.0 is the first SciPy release to support Python 3. The only module
that is not yet ported is ``scipy.weave``.
Scipy source code location to be changed
========================================
Soon after this release, Scipy will stop using SVN as the version control
system, and move to Git. The development source code for Scipy can from then on
be found at
http://github.com/scipy/scipy
New features
============
Delaunay tesselations (``scipy.spatial``)
-----------------------------------------
Scipy now includes routines for computing Delaunay tesselations in N
dimensions, powered by the Qhull_ computational geometry library. Such
calculations can now make use of the new ``scipy.spatial.Delaunay``
interface.
.. _Qhull: http://www.qhull.org/
N-dimensional interpolation (``scipy.interpolate``)
---------------------------------------------------
Support for scattered data interpolation is now significantly
improved. This version includes a ``scipy.interpolate.griddata``
function that can perform linear and nearest-neighbour interpolation
for N-dimensional scattered data, in addition to cubic spline
(C1-smooth) interpolation in 2D and 1D. An object-oriented interface
to each interpolator type is also available.
Nonlinear equation solvers (``scipy.optimize``)
-----------------------------------------------
Scipy includes new routines for large-scale nonlinear equation solving
in ``scipy.optimize``. The following methods are implemented:
* Newton-Krylov (``scipy.optimize.newton_krylov``)
* (Generalized) secant methods:
- Limited-memory Broyden methods (``scipy.optimize.broyden1``,
``scipy.optimize.broyden2``)
- Anderson method (``scipy.optimize.anderson``)
* Simple iterations (``scipy.optimize.diagbroyden``,
``scipy.optimize.excitingmixing``, ``scipy.optimize.linearmixing``)
The ``scipy.optimize.nonlin`` module was completely rewritten, and
some of the functions were deprecated (see above).
New linear algebra routines (``scipy.linalg``)
----------------------------------------------
Scipy now contains routines for effectively solving triangular
equation systems (``scipy.linalg.solve_triangular``).
Improved FIR filter design functions (``scipy.signal``)
-------------------------------------------------------
The function ``scipy.signal.firwin`` was enhanced to allow the
design of highpass, bandpass, bandstop and multi-band FIR filters.
The function ``scipy.signal.firwin2`` was added. This function
uses the window method to create a linear phase FIR filter with
an arbitrary frequency response.
The functions ``scipy.signal.kaiser_atten`` and ``scipy.signal.kaiser_beta``
were added.
Improved statistical tests (``scipy.stats``)
--------------------------------------------
A new function ``scipy.stats.fisher_exact`` was added, that provides Fisher's
exact test for 2x2 contingency tables.
The function ``scipy.stats.kendalltau`` was rewritten to make it much faster
(O(n log(n)) vs O(n^2)).
Deprecated features
===================
Obsolete nonlinear solvers (in ``scipy.optimize``)
--------------------------------------------------
The following nonlinear solvers from ``scipy.optimize`` are
deprecated:
- ``broyden_modified`` (bad performance)
- ``broyden1_modified`` (bad performance)
- ``broyden_generalized`` (equivalent to ``anderson``)
- ``anderson2`` (equivalent to ``anderson``)
- ``broyden3`` (obsoleted by new limited-memory broyden methods)
- ``vackar`` (renamed to ``diagbroyden``)
Removed features
================
The deprecated modules ``helpmod``, ``pexec`` and ``ppimport`` were removed
from ``scipy.misc``.
The ``output_type`` keyword in many ``scipy.ndimage`` interpolation functions
has been removed.
The ``econ`` keyword in ``scipy.linalg.qr`` has been removed. The same
functionality is still available by specifying ``mode='economic'``.
Old correlate/convolve behavior (in ``scipy.signal``)
-----------------------------------------------------
The old behavior for ``scipy.signal.convolve``, ``scipy.signal.convolve2d``,
``scipy.signal.correlate`` and ``scipy.signal.correlate2d`` was deprecated in
0.8.0 and has now been removed. Convolve and correlate used to swap their
arguments if the second argument has dimensions larger than the first one, and
the mode was relative to the input with the largest dimension. The current
behavior is to never swap the inputs, which is what most people expect, and is
how correlation is usually defined.
``scipy.stats``
---------------
Many functions in ``scipy.stats`` that are either available from numpy or have
been superseded, and have been deprecated since version 0.7, have been removed:
`std`, `var`, `mean`, `median`, `cov`, `corrcoef`, `z`, `zs`, `stderr`,
`samplestd`, `samplevar`, `pdfapprox`, `pdf_moments` and `erfc`. These changes
are mirrored in ``scipy.stats.mstats``.
``scipy.sparse``
----------------
Several methods of the sparse matrix classes in ``scipy.sparse`` which had
been deprecated since version 0.7 were removed: `save`, `rowcol`, `getdata`,
`listprint`, `ensure_sorted_indices`, `matvec`, `matmat` and `rmatvec`.
The functions ``spkron``, ``speye``, ``spidentity``, ``lil_eye`` and
``lil_diags`` were removed from ``scipy.sparse``. The first three functions
are still available as ``scipy.sparse.kron``, ``scipy.sparse.eye`` and
``scipy.sparse.identity``.
The `dims` and `nzmax` keywords were removed from the sparse matrix
constructor. The `colind` and `rowind` attributes were removed from CSR and CSC
matrices respectively.
``scipy.sparse.linalg.arpack.speigs``
-------------------------------------
A duplicated interface to the ARPACK library was removed.
Other changes
=============
ARPACK interface changes
------------------------
The interface to the ARPACK eigenvalue routines in
``scipy.sparse.linalg`` was changed for more robustness.
The eigenvalue and SVD routines now raise ``ArpackNoConvergence`` if
the eigenvalue iteration fails to converge. If partially converged results
are desired, they can be accessed as follows::
import numpy as np
from scipy.sparse.linalg import eigs, ArpackNoConvergence
m = np.random.randn(30, 30)
try:
w, v = eigs(m, 6)
except ArpackNoConvergence, err:
partially_converged_w = err.eigenvalues
partially_converged_v = err.eigenvectors
Several bugs were also fixed.
The routines were moreover renamed as follows:
- eigen --> eigs
- eigen_symmetric --> eigsh
- svd --> svds

View file

@ -1,180 +0,0 @@
@import "default.css";
/**
* Spacing fixes
*/
div.body p, div.body dd, div.body li {
line-height: 125%;
}
ul.simple {
margin-top: 0;
margin-bottom: 0;
padding-top: 0;
padding-bottom: 0;
}
/* spacing around blockquoted fields in parameters/attributes/returns */
td.field-body > blockquote {
margin-top: 0.1em;
margin-bottom: 0.5em;
}
/* spacing around example code */
div.highlight > pre {
padding: 2px 5px 2px 5px;
}
/* spacing in see also definition lists */
dl.last > dd {
margin-top: 1px;
margin-bottom: 5px;
margin-left: 30px;
}
/**
* Hide dummy toctrees
*/
ul {
padding-top: 0;
padding-bottom: 0;
margin-top: 0;
margin-bottom: 0;
}
ul li {
padding-top: 0;
padding-bottom: 0;
margin-top: 0;
margin-bottom: 0;
}
ul li a.reference {
padding-top: 0;
padding-bottom: 0;
margin-top: 0;
margin-bottom: 0;
}
/**
* Make high-level subsections easier to distinguish from top-level ones
*/
div.body h3 {
background-color: transparent;
}
div.body h4 {
border: none;
background-color: transparent;
}
/**
* Scipy colors
*/
body {
background-color: rgb(100,135,220);
}
div.document {
background-color: rgb(230,230,230);
}
div.sphinxsidebar {
background-color: rgb(230,230,230);
overflow: hidden;
}
div.related {
background-color: rgb(100,135,220);
}
div.sphinxsidebar h3 {
color: rgb(0,102,204);
}
div.sphinxsidebar h3 a {
color: rgb(0,102,204);
}
div.sphinxsidebar h4 {
color: rgb(0,82,194);
}
div.sphinxsidebar p {
color: black;
}
div.sphinxsidebar a {
color: #355f7c;
}
div.sphinxsidebar ul.want-points {
list-style: disc;
}
.field-list th {
color: rgb(0,102,204);
}
/**
* Extra admonitions
*/
div.tip {
background-color: #ffffe4;
border: 1px solid #ee6;
}
div.plot-output {
clear-after: both;
}
div.plot-output .figure {
float: left;
text-align: center;
margin-bottom: 0;
padding-bottom: 0;
}
div.plot-output .caption {
margin-top: 2;
padding-top: 0;
}
div.plot-output:after {
content: "";
display: block;
height: 0;
clear: both;
}
/*
div.admonition-example {
background-color: #e4ffe4;
border: 1px solid #ccc;
}*/
/**
* Styling for field lists
*/
table.field-list th {
border-left: 1px solid #aaa !important;
padding-left: 5px;
}
table.field-list {
border-collapse: separate;
border-spacing: 10px;
}
/**
* Styling for footnotes
*/
table.footnote td, table.footnote th {
border: none;
}

View file

@ -1,186 +0,0 @@
@import "default.css";
/**
* Spacing fixes
*/
div.body p, div.body dd, div.body li {
line-height: 125%;
}
ul.simple {
margin-top: 0;
margin-bottom: 0;
padding-top: 0;
padding-bottom: 0;
}
/* spacing around blockquoted fields in parameters/attributes/returns */
td.field-body > blockquote {
margin-top: 0.1em;
margin-bottom: 0.5em;
}
/* spacing around example code */
div.highlight > pre {
padding: 2px 5px 2px 5px;
}
/* spacing in see also definition lists */
dl.last > dd {
margin-top: 1px;
margin-bottom: 5px;
margin-left: 30px;
}
/* hide overflowing content in the sidebar */
div.sphinxsidebarwrapper p.topless {
overflow: hidden;
}
/**
* Hide dummy toctrees
*/
ul {
padding-top: 0;
padding-bottom: 0;
margin-top: 0;
margin-bottom: 0;
}
ul li {
padding-top: 0;
padding-bottom: 0;
margin-top: 0;
margin-bottom: 0;
}
ul li a.reference {
padding-top: 0;
padding-bottom: 0;
margin-top: 0;
margin-bottom: 0;
}
/**
* Make high-level subsections easier to distinguish from top-level ones
*/
div.body h3 {
background-color: transparent;
}
div.body h4 {
border: none;
background-color: transparent;
}
/**
* Scipy colors
*/
body {
background-color: rgb(100,135,220);
}
div.document {
background-color: rgb(230,230,230);
}
div.sphinxsidebar {
background-color: rgb(230,230,230);
overflow: hidden;
}
div.related {
background-color: rgb(100,135,220);
}
div.sphinxsidebar h3 {
color: rgb(0,102,204);
}
div.sphinxsidebar h3 a {
color: rgb(0,102,204);
}
div.sphinxsidebar h4 {
color: rgb(0,82,194);
}
div.sphinxsidebar p {
color: black;
}
div.sphinxsidebar a {
color: #355f7c;
}
div.sphinxsidebar ul.want-points {
list-style: disc;
}
.field-list th {
color: rgb(0,102,204);
white-space: nowrap;
}
/**
* Extra admonitions
*/
div.tip {
background-color: #ffffe4;
border: 1px solid #ee6;
}
div.plot-output {
clear-after: both;
}
div.plot-output .figure {
float: left;
text-align: center;
margin-bottom: 0;
padding-bottom: 0;
}
div.plot-output .caption {
margin-top: 2;
padding-top: 0;
}
div.plot-output:after {
content: "";
display: block;
height: 0;
clear: both;
}
/*
div.admonition-example {
background-color: #e4ffe4;
border: 1px solid #ccc;
}*/
/**
* Styling for field lists
*/
table.field-list th {
border-left: 1px solid #aaa !important;
padding-left: 5px;
}
table.field-list {
border-collapse: separate;
border-spacing: 10px;
}
/**
* Styling for footnotes
*/
table.footnote td, table.footnote th {
border: none;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

View file

@ -1,43 +0,0 @@
{% extends "!autosummary/class.rst" %}
{% block methods %}
{% if methods %}
<<<<<<< HEAD
.. HACK
.. autosummary::
:toctree:
{% for item in methods %}
{{ name }}.{{ item }}
=======
.. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages.
.. autosummary::
:toctree:
{% for item in all_methods %}
{%- if not item.startswith('_') or item in ['__call__'] %}
{{ name }}.{{ item }}
{%- endif -%}
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
{%- endfor %}
{% endif %}
{% endblock %}
{% block attributes %}
{% if attributes %}
<<<<<<< HEAD
.. HACK
.. autosummary::
:toctree:
{% for item in attributes %}
{{ name }}.{{ item }}
=======
.. HACK -- the point here is that we don't want this to appear in the output, but the autosummary should still generate the pages.
.. autosummary::
:toctree:
{% for item in all_attributes %}
{%- if not item.startswith('_') %}
{{ name }}.{{ item }}
{%- endif -%}
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
{%- endfor %}
{% endif %}
{% endblock %}

View file

@ -1,5 +0,0 @@
<h3>Resources</h3>
<ul>
<li><a href="http://scipy.org/">Scipy.org website</a></li>
<li>&nbsp;</li>
</ul>

View file

@ -1,14 +0,0 @@
{% extends "!layout.html" %}
{% block sidebarsearch %}
{%- if sourcename %}
<ul class="this-page-menu">
{%- if 'generated/' in sourcename %}
<li><a href="/scipy/docs/{{ sourcename.replace('generated/', '').replace('.txt', '') |e }}">{{_('Edit page')}}</a></li>
{%- else %}
<li><a href="/scipy/docs/scipy-docs/{{ sourcename.replace('.txt', '.rst') |e }}">{{_('Edit page')}}</a></li>
{%- endif %}
</ul>
{%- endif %}
{{ super() }}
{% endblock %}

View file

@ -1 +0,0 @@
.. include:: ../API.rst.txt

View file

@ -1,14 +0,0 @@
<<<<<<< HEAD
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. warning::
This documentation is work-in-progress and unorganized.
.. automodule:: scipy.cluster.hierarchy
:members:
=======
.. automodule:: scipy.cluster.hierarchy
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1,20 +0,0 @@
<<<<<<< HEAD
=========================================
Clustering package (:mod:`scipy.cluster`)
=========================================
.. toctree::
cluster.hierarchy
cluster.vq
.. automodule:: scipy.cluster
=======
.. automodule:: scipy.cluster
.. toctree::
:hidden:
cluster.vq
cluster.hierarchy
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1,10 +0,0 @@
<<<<<<< HEAD
====================================================================
K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
====================================================================
.. automodule:: scipy.cluster.vq
:members:
=======
.. automodule:: scipy.cluster.vq
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1,520 +0,0 @@
# -*- coding: utf-8 -*-
import sys, os, re
<<<<<<< HEAD
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
# Check Sphinx version
import sphinx
if sphinx.__version__ < "0.5":
raise RuntimeError("Sphinx 0.5.dev or newer required")
=======
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.1":
raise RuntimeError("Sphinx 1.1 or newer required")
needs_sphinx = '1.1'
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
<<<<<<< HEAD
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'plot_directive']
if sphinx.__version__ >= "0.7":
extensions.append('sphinx.ext.autosummary')
else:
extensions.append('autosummary')
extensions.append('only_directives')
=======
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.autosummary', 'scipyoptdoc']
# Determine if the matplotlib has a recent enough version of the
# plot_directive.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
raise RuntimeError("You need a recent enough version of matplotlib")
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SciPy'
<<<<<<< HEAD
copyright = '2008-2009, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
import scipy
# The short X.Y version (including the .devXXXX suffix if present)
version = re.sub(r'^(\d+\.\d+)\.\d+(.*)', r'\1\2', scipy.__version__)
if 'dev' in version:
# retain the .dev suffix, but clean it up
version = re.sub(r'(\.dev\d*).*?$', r'\1', version)
else:
# strip all other suffixes
version = re.sub(r'^(\d+\.\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = scipy.__version__
print "Scipy (VERSION %s) (RELEASE %s)" % (version, release)
=======
copyright = '2008-2014, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
import scipy
version = re.sub(r'\.dev-.*$', r'.dev', scipy.__version__)
release = scipy.__version__
print "Scipy (VERSION %s)" % (version,)
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
<<<<<<< HEAD
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'scipy.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s v%s Reference Guide (DRAFT)" % (project, version)
# The name of an image file (within the static path) to place at the top of
# the sidebar.
html_logo = '_static/scipyshiny_small.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Correct index page
#html_index = "index"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': 'indexsidebar.html'
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".html").
html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'scipy'
# Pngmath should try to align formulas properly
pngmath_use_preview = True
=======
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if os.path.isdir(themedir):
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("http://scipy.org/", "Scipy.org"),
("http://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
else:
# Build without scipy.org sphinx theme present
if 'scipyorg' in tags:
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init & update")
else:
html_style = 'scipy_fallback.css'
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
html_title = "%s v%s Reference Guide" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_additional_pages = {}
html_use_modindex = True
html_copy_source = False
html_file_suffix = '.html'
htmlhelp_basename = 'scipy'
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the SciPy community'
latex_documents = [
('index', 'scipy-ref.tex', 'SciPy Reference Guide', _stdauthor, 'manual'),
# ('user/index', 'scipy-user.tex', 'SciPy User Guide',
# _stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
<<<<<<< HEAD
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
=======
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters etc. sections, align uniformly, and adjust label emphasis
\usepackage{expdlist}
\let\latexdescription=\description
\let\endlatexdescription=\enddescription
\renewenvironment{description}%
{\begin{latexdescription}[\setleftmargin{60pt}\breaklabel\setlabelstyle{\bfseries\itshape}]}%
{\end{latexdescription}}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\normalfont\bfseries\itshape}%
{\py@NormalColor}{0em}{\py@NormalColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Save vertical space in parameter lists and elsewhere
\makeatletter
\renewenvironment{quote}%
{\list{}{\topsep=0pt%
\parsep \z@ \@plus\p@}%
\item\relax}%
{\endlist}
\makeatother
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'http://docs.python.org/dev': None,
'http://docs.scipy.org/doc/numpy': None,
}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
<<<<<<< HEAD
# Edit links
#numpydoc_edit_link = '`Edit </pydocweb/doc/%(full_name)s/>`__'
=======
# Generate plots for example sections
numpydoc_use_plots = True
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
<<<<<<< HEAD
if sphinx.__version__ >= "0.7":
=======
if sphinx.__version__ >= "0.7":
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
import glob
autosummary_generate = glob.glob("*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
#------------------------------------------------------------------------------
# Plot
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
<<<<<<< HEAD
import scipy as sp
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
=======
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 96), 'pdf']
plot_html_show_formats = False
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
import math
phi = (math.sqrt(5) + 1)/2
<<<<<<< HEAD
import matplotlib
matplotlib.rcParams.update({
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
=======
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
<<<<<<< HEAD
})
=======
}
if not use_matplotlib_plot_directive:
import matplotlib
matplotlib.rcParams.update(plot_rcparams)
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print "NOTE: linkcode extension not found -- no links to source generated"
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(scipy.__file__))
if 'dev' in scipy.__version__:
return "http://github.com/scipy/scipy/blob/master/scipy/%s%s" % (
fn, linespec)
else:
return "http://github.com/scipy/scipy/blob/v%s/scipy/%s%s" % (
scipy.__version__, fn, linespec)
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1,586 +0,0 @@
<<<<<<< HEAD
==================================
Constants (:mod:`scipy.constants`)
==================================
.. module:: scipy.constants
Physical and mathematical constants and units.
Mathematical constants
======================
============ =================================================================
``pi`` Pi
``golden`` Golden ratio
============ =================================================================
Physical constants
==================
============= =================================================================
``c`` speed of light in vacuum
``mu_0`` the magnetic constant :math:`\mu_0`
``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
``h`` the Planck constant :math:`h`
``hbar`` :math:`\hbar = h/(2\pi)`
``G`` Newtonian constant of gravitation
``g`` standard acceleration of gravity
``e`` elementary charge
``R`` molar gas constant
``alpha`` fine-structure constant
``N_A`` Avogadro constant
``k`` Boltzmann constant
``sigma`` Stefan-Boltzmann constant :math:`\sigma`
``Wien`` Wien displacement law constant
``Rydberg`` Rydberg constant
``m_e`` electron mass
``m_p`` proton mass
``m_n`` neutron mass
============= =================================================================
Constants database
==================
In addition to the above variables containing physical constants,
:mod:`scipy.constants` also contains a database of additional physical
constants.
.. autosummary::
:toctree: generated/
value
unit
precision
find
.. data:: physical_constants
Dictionary of physical constants, of the format
``physical_constants[name] = (value, unit, uncertainty)``.
Available constants:
====================================================================== ====
``alpha particle mass``
``alpha particle mass energy equivalent``
``alpha particle mass energy equivalent in MeV``
``alpha particle mass in u``
``alpha particle molar mass``
``alpha particle-electron mass ratio``
``alpha particle-proton mass ratio``
``Angstrom star``
``atomic mass constant``
``atomic mass constant energy equivalent``
``atomic mass constant energy equivalent in MeV``
``atomic mass unit-electron volt relationship``
``atomic mass unit-hartree relationship``
``atomic mass unit-hertz relationship``
``atomic mass unit-inverse meter relationship``
``atomic mass unit-joule relationship``
``atomic mass unit-kelvin relationship``
``atomic mass unit-kilogram relationship``
``atomic unit of 1st hyperpolarizablity``
``atomic unit of 2nd hyperpolarizablity``
``atomic unit of action``
``atomic unit of charge``
``atomic unit of charge density``
``atomic unit of current``
``atomic unit of electric dipole moment``
``atomic unit of electric field``
``atomic unit of electric field gradient``
``atomic unit of electric polarizablity``
``atomic unit of electric potential``
``atomic unit of electric quadrupole moment``
``atomic unit of energy``
``atomic unit of force``
``atomic unit of length``
``atomic unit of magnetic dipole moment``
``atomic unit of magnetic flux density``
``atomic unit of magnetizability``
``atomic unit of mass``
``atomic unit of momentum``
``atomic unit of permittivity``
``atomic unit of time``
``atomic unit of velocity``
``Avogadro constant``
``Bohr magneton``
``Bohr magneton in eV/T``
``Bohr magneton in Hz/T``
``Bohr magneton in inverse meters per tesla``
``Bohr magneton in K/T``
``Bohr radius``
``Boltzmann constant``
``Boltzmann constant in eV/K``
``Boltzmann constant in Hz/K``
``Boltzmann constant in inverse meters per kelvin``
``characteristic impedance of vacuum``
``classical electron radius``
``Compton wavelength``
``Compton wavelength over 2 pi``
``conductance quantum``
``conventional value of Josephson constant``
``conventional value of von Klitzing constant``
``Cu x unit``
``deuteron magnetic moment``
``deuteron magnetic moment to Bohr magneton ratio``
``deuteron magnetic moment to nuclear magneton ratio``
``deuteron mass``
``deuteron mass energy equivalent``
``deuteron mass energy equivalent in MeV``
``deuteron mass in u``
``deuteron molar mass``
``deuteron rms charge radius``
``deuteron-electron magnetic moment ratio``
``deuteron-electron mass ratio``
``deuteron-neutron magnetic moment ratio``
``deuteron-proton magnetic moment ratio``
``deuteron-proton mass ratio``
``electric constant``
``electron charge to mass quotient``
``electron g factor``
``electron gyromagnetic ratio``
``electron gyromagnetic ratio over 2 pi``
``electron magnetic moment``
``electron magnetic moment anomaly``
``electron magnetic moment to Bohr magneton ratio``
``electron magnetic moment to nuclear magneton ratio``
``electron mass``
``electron mass energy equivalent``
``electron mass energy equivalent in MeV``
``electron mass in u``
``electron molar mass``
``electron to alpha particle mass ratio``
``electron to shielded helion magnetic moment ratio``
``electron to shielded proton magnetic moment ratio``
``electron volt``
``electron volt-atomic mass unit relationship``
``electron volt-hartree relationship``
``electron volt-hertz relationship``
``electron volt-inverse meter relationship``
``electron volt-joule relationship``
``electron volt-kelvin relationship``
``electron volt-kilogram relationship``
``electron-deuteron magnetic moment ratio``
``electron-deuteron mass ratio``
``electron-muon magnetic moment ratio``
``electron-muon mass ratio``
``electron-neutron magnetic moment ratio``
``electron-neutron mass ratio``
``electron-proton magnetic moment ratio``
``electron-proton mass ratio``
``electron-tau mass ratio``
``elementary charge``
``elementary charge over h``
``Faraday constant``
``Faraday constant for conventional electric current``
``Fermi coupling constant``
``fine-structure constant``
``first radiation constant``
``first radiation constant for spectral radiance``
``Hartree energy``
``Hartree energy in eV``
``hartree-atomic mass unit relationship``
``hartree-electron volt relationship``
``hartree-hertz relationship``
``hartree-inverse meter relationship``
``hartree-joule relationship``
``hartree-kelvin relationship``
``hartree-kilogram relationship``
``helion mass``
``helion mass energy equivalent``
``helion mass energy equivalent in MeV``
``helion mass in u``
``helion molar mass``
``helion-electron mass ratio``
``helion-proton mass ratio``
``hertz-atomic mass unit relationship``
``hertz-electron volt relationship``
``hertz-hartree relationship``
``hertz-inverse meter relationship``
``hertz-joule relationship``
``hertz-kelvin relationship``
``hertz-kilogram relationship``
``inverse fine-structure constant``
``inverse meter-atomic mass unit relationship``
``inverse meter-electron volt relationship``
``inverse meter-hartree relationship``
``inverse meter-hertz relationship``
``inverse meter-joule relationship``
``inverse meter-kelvin relationship``
``inverse meter-kilogram relationship``
``inverse of conductance quantum``
``Josephson constant``
``joule-atomic mass unit relationship``
``joule-electron volt relationship``
``joule-hartree relationship``
``joule-hertz relationship``
``joule-inverse meter relationship``
``joule-kelvin relationship``
``joule-kilogram relationship``
``kelvin-atomic mass unit relationship``
``kelvin-electron volt relationship``
``kelvin-hartree relationship``
``kelvin-hertz relationship``
``kelvin-inverse meter relationship``
``kelvin-joule relationship``
``kelvin-kilogram relationship``
``kilogram-atomic mass unit relationship``
``kilogram-electron volt relationship``
``kilogram-hartree relationship``
``kilogram-hertz relationship``
``kilogram-inverse meter relationship``
``kilogram-joule relationship``
``kilogram-kelvin relationship``
``lattice parameter of silicon``
``Loschmidt constant (273.15 K, 101.325 kPa)``
``magnetic constant``
``magnetic flux quantum``
``Mo x unit``
``molar gas constant``
``molar mass constant``
``molar mass of carbon-12``
``molar Planck constant``
``molar Planck constant times c``
``molar volume of ideal gas (273.15 K, 100 kPa)``
``molar volume of ideal gas (273.15 K, 101.325 kPa)``
``molar volume of silicon``
``muon Compton wavelength``
``muon Compton wavelength over 2 pi``
``muon g factor``
``muon magnetic moment``
``muon magnetic moment anomaly``
``muon magnetic moment to Bohr magneton ratio``
``muon magnetic moment to nuclear magneton ratio``
``muon mass``
``muon mass energy equivalent``
``muon mass energy equivalent in MeV``
``muon mass in u``
``muon molar mass``
``muon-electron mass ratio``
``muon-neutron mass ratio``
``muon-proton magnetic moment ratio``
``muon-proton mass ratio``
``muon-tau mass ratio``
``natural unit of action``
``natural unit of action in eV s``
``natural unit of energy``
``natural unit of energy in MeV``
``natural unit of length``
``natural unit of mass``
``natural unit of momentum``
``natural unit of momentum in MeV/c``
``natural unit of time``
``natural unit of velocity``
``neutron Compton wavelength``
``neutron Compton wavelength over 2 pi``
``neutron g factor``
``neutron gyromagnetic ratio``
``neutron gyromagnetic ratio over 2 pi``
``neutron magnetic moment``
``neutron magnetic moment to Bohr magneton ratio``
``neutron magnetic moment to nuclear magneton ratio``
``neutron mass``
``neutron mass energy equivalent``
``neutron mass energy equivalent in MeV``
``neutron mass in u``
``neutron molar mass``
``neutron to shielded proton magnetic moment ratio``
``neutron-electron magnetic moment ratio``
``neutron-electron mass ratio``
``neutron-muon mass ratio``
``neutron-proton magnetic moment ratio``
``neutron-proton mass ratio``
``neutron-tau mass ratio``
``Newtonian constant of gravitation``
``Newtonian constant of gravitation over h-bar c``
``nuclear magneton``
``nuclear magneton in eV/T``
``nuclear magneton in inverse meters per tesla``
``nuclear magneton in K/T``
``nuclear magneton in MHz/T``
``Planck constant``
``Planck constant in eV s``
``Planck constant over 2 pi``
``Planck constant over 2 pi in eV s``
``Planck constant over 2 pi times c in MeV fm``
``Planck length``
``Planck mass``
``Planck temperature``
``Planck time``
``proton charge to mass quotient``
``proton Compton wavelength``
``proton Compton wavelength over 2 pi``
``proton g factor``
``proton gyromagnetic ratio``
``proton gyromagnetic ratio over 2 pi``
``proton magnetic moment``
``proton magnetic moment to Bohr magneton ratio``
``proton magnetic moment to nuclear magneton ratio``
``proton magnetic shielding correction``
``proton mass``
``proton mass energy equivalent``
``proton mass energy equivalent in MeV``
``proton mass in u``
``proton molar mass``
``proton rms charge radius``
``proton-electron mass ratio``
``proton-muon mass ratio``
``proton-neutron magnetic moment ratio``
``proton-neutron mass ratio``
``proton-tau mass ratio``
``quantum of circulation``
``quantum of circulation times 2``
``Rydberg constant``
``Rydberg constant times c in Hz``
``Rydberg constant times hc in eV``
``Rydberg constant times hc in J``
``Sackur-Tetrode constant (1 K, 100 kPa)``
``Sackur-Tetrode constant (1 K, 101.325 kPa)``
``second radiation constant``
``shielded helion gyromagnetic ratio``
``shielded helion gyromagnetic ratio over 2 pi``
``shielded helion magnetic moment``
``shielded helion magnetic moment to Bohr magneton ratio``
``shielded helion magnetic moment to nuclear magneton ratio``
``shielded helion to proton magnetic moment ratio``
``shielded helion to shielded proton magnetic moment ratio``
``shielded proton gyromagnetic ratio``
``shielded proton gyromagnetic ratio over 2 pi``
``shielded proton magnetic moment``
``shielded proton magnetic moment to Bohr magneton ratio``
``shielded proton magnetic moment to nuclear magneton ratio``
``speed of light in vacuum``
``standard acceleration of gravity``
``standard atmosphere``
``Stefan-Boltzmann constant``
``tau Compton wavelength``
``tau Compton wavelength over 2 pi``
``tau mass``
``tau mass energy equivalent``
``tau mass energy equivalent in MeV``
``tau mass in u``
``tau molar mass``
``tau-electron mass ratio``
``tau-muon mass ratio``
``tau-neutron mass ratio``
``tau-proton mass ratio``
``Thomson cross section``
``unified atomic mass unit``
``von Klitzing constant``
``weak mixing angle``
``Wien displacement law constant``
``{220} lattice spacing of silicon``
====================================================================== ====
Unit prefixes
=============
SI
--
============ =================================================================
``yotta`` :math:`10^{24}`
``zetta`` :math:`10^{21}`
``exa`` :math:`10^{18}`
``peta`` :math:`10^{15}`
``tera`` :math:`10^{12}`
``giga`` :math:`10^{9}`
``mega`` :math:`10^{6}`
``kilo`` :math:`10^{3}`
``hecto`` :math:`10^{2}`
``deka`` :math:`10^{1}`
``deci`` :math:`10^{-1}`
``centi`` :math:`10^{-2}`
``milli`` :math:`10^{-3}`
``micro`` :math:`10^{-6}`
``nano`` :math:`10^{-9}`
``pico`` :math:`10^{-12}`
``femto`` :math:`10^{-15}`
``atto`` :math:`10^{-18}`
``zepto`` :math:`10^{-21}`
============ =================================================================
Binary
------
============ =================================================================
``kibi`` :math:`2^{10}`
``mebi`` :math:`2^{20}`
``gibi`` :math:`2^{30}`
``tebi`` :math:`2^{40}`
``pebi`` :math:`2^{50}`
``exbi`` :math:`2^{60}`
``zebi`` :math:`2^{70}`
``yobi`` :math:`2^{80}`
============ =================================================================
Units
=====
Weight
------
================= ============================================================
``gram`` :math:`10^{-3}` kg
``metric_ton`` :math:`10^{3}` kg
``grain`` one grain in kg
``lb`` one pound (avoirdupous) in kg
``oz`` one ounce in kg
``stone`` one stone in kg
``grain`` one grain in kg
``long_ton`` one long ton in kg
``short_ton`` one short ton in kg
``troy_ounce`` one Troy ounce in kg
``troy_pound`` one Troy pound in kg
``carat`` one carat in kg
``m_u`` atomic mass constant (in kg)
================= ============================================================
Angle
-----
================= ============================================================
``degree`` degree in radians
``arcmin`` arc minute in radians
``arcsec`` arc second in radians
================= ============================================================
Time
----
================= ============================================================
``minute`` one minute in seconds
``hour`` one hour in seconds
``day`` one day in seconds
``week`` one week in seconds
``year`` one year (365 days) in seconds
``Julian_year`` one Julian year (365.25 days) in seconds
================= ============================================================
Length
------
================= ============================================================
``inch`` one inch in meters
``foot`` one foot in meters
``yard`` one yard in meters
``mile`` one mile in meters
``mil`` one mil in meters
``pt`` one point in meters
``survey_foot`` one survey foot in meters
``survey_mile`` one survey mile in meters
``nautical_mile`` one nautical mile in meters
``fermi`` one Fermi in meters
``angstrom`` one Ångström in meters
``micron`` one micron in meters
``au`` one astronomical unit in meters
``light_year`` one light year in meters
``parsec`` one parsec in meters
================= ============================================================
Pressure
--------
================= ============================================================
``atm`` standard atmosphere in pascals
``bar`` one bar in pascals
``torr`` one torr (mmHg) in pascals
``psi`` one psi in pascals
================= ============================================================
Area
----
================= ============================================================
``hectare`` one hectare in square meters
``acre`` one acre in square meters
================= ============================================================
Volume
------
=================== ========================================================
``liter`` one liter in cubic meters
``gallon`` one gallon (US) in cubic meters
``gallon_imp`` one gallon (UK) in cubic meters
``fluid_ounce`` one fluid ounce (US) in cubic meters
``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
``bbl`` one barrel in cubic meters
=================== ========================================================
Speed
-----
================= ==========================================================
``kmh`` kilometers per hour in meters per second
``mph`` miles per hour in meters per second
``mach`` one Mach (approx., at 15 °C, 1 atm) in meters per second
``knot`` one knot in meters per second
================= ==========================================================
Temperature
-----------
===================== =======================================================
``zero_Celsius`` zero of Celsius scale in Kelvin
``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
===================== =======================================================
.. autosummary::
:toctree: generated/
C2K
K2C
F2C
C2F
F2K
K2F
Energy
------
==================== =======================================================
``eV`` one electron volt in Joules
``calorie`` one calorie (thermochemical) in Joules
``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
``erg`` one erg in Joules
``Btu`` one British thermal unit (International Steam Table) in Joules
``Btu_th`` one British thermal unit (thermochemical) in Joules
``ton_TNT`` one ton of TNT in Joules
==================== =======================================================
Power
-----
==================== =======================================================
``hp`` one horsepower in watts
==================== =======================================================
Force
-----
==================== =======================================================
``dyn`` one dyne in newtons
``lbf`` one pound force in newtons
``kgf`` one kilogram force in newtons
==================== =======================================================
Optics
------
.. autosummary::
:toctree: generated/
lambda2nu
nu2lambda
=======
.. automodule:: scipy.constants
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1,81 +0,0 @@
<<<<<<< HEAD
Fourier transforms (:mod:`scipy.fftpack`)
=========================================
.. module:: scipy.fftpack
Fast Fourier transforms
-----------------------
.. autosummary::
:toctree: generated/
fft
ifft
fftn
ifftn
fft2
ifft2
rfft
irfft
Differential and pseudo-differential operators
----------------------------------------------
.. autosummary::
:toctree: generated/
diff
tilbert
itilbert
hilbert
ihilbert
cs_diff
sc_diff
ss_diff
cc_diff
shift
Helper functions
----------------
.. autosummary::
:toctree: generated/
fftshift
ifftshift
dftfreq
rfftfreq
Convolutions (:mod:`scipy.fftpack.convolve`)
--------------------------------------------
.. module:: scipy.fftpack.convolve
.. autosummary::
:toctree: generated/
convolve
convolve_z
init_convolution_kernel
destroy_convolve_cache
Other (:mod:`scipy.fftpack._fftpack`)
-------------------------------------
.. module:: scipy.fftpack._fftpack
.. autosummary::
:toctree: generated/
drfft
zfft
zrfft
zfftnd
destroy_drfft_cache
destroy_zfft_cache
destroy_zfftnd_cache
=======
.. automodule:: scipy.fftpack
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1 +0,0 @@
.. include:: ../../HACKING.rst.txt

View file

@ -1,64 +0,0 @@
SciPy
=====
<<<<<<< HEAD
:Release: |version|
=======
:Release: |release|
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
:Date: |today|
SciPy (pronounced "Sigh Pie") is open-source software for mathematics,
science, and engineering.
.. toctree::
:maxdepth: 2
tutorial/index
.. toctree::
:maxdepth: 1
<<<<<<< HEAD
=======
hacking
api
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
release
Reference
---------
.. toctree::
:maxdepth: 1
cluster
constants
fftpack
integrate
interpolate
io
linalg
<<<<<<< HEAD
maxentropy
=======
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
misc
ndimage
odr
optimize
signal
sparse
sparse.linalg
<<<<<<< HEAD
spatial
special
stats
=======
sparse.csgraph
spatial
special
stats
stats.mstats
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b
weave

View file

@ -1,48 +0,0 @@
<<<<<<< HEAD
=============================================
Integration and ODEs (:mod:`scipy.integrate`)
=============================================
.. module:: scipy.integrate
Integrating functions, given function object
============================================
.. autosummary::
:toctree: generated/
quad
dblquad
tplquad
fixed_quad
quadrature
romberg
Integrating functions, given fixed samples
==========================================
.. autosummary::
:toctree: generated/
trapz
cumtrapz
simps
romb
.. seealso::
:mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
quadrature roots and weights for other weighting factors and regions.
Integrators of ODE systems
==========================
.. autosummary::
:toctree: generated/
odeint
ode
=======
.. automodule:: scipy.integrate
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1,104 +0,0 @@
<<<<<<< HEAD
========================================
Interpolation (:mod:`scipy.interpolate`)
========================================
.. module:: scipy.interpolate
Univariate interpolation
========================
.. autosummary::
:toctree: generated/
interp1d
BarycentricInterpolator
KroghInterpolator
PiecewisePolynomial
barycentric_interpolate
krogh_interpolate
piecewise_polynomial_interpolate
Multivariate interpolation
==========================
.. autosummary::
:toctree: generated/
interp2d
Rbf
1-D Splines
===========
.. autosummary::
:toctree: generated/
UnivariateSpline
InterpolatedUnivariateSpline
LSQUnivariateSpline
The above univariate spline classes have the following methods:
.. autosummary::
:toctree: generated/
UnivariateSpline.__call__
UnivariateSpline.derivatives
UnivariateSpline.integral
UnivariateSpline.roots
UnivariateSpline.get_coeffs
UnivariateSpline.get_knots
UnivariateSpline.get_residual
UnivariateSpline.set_smoothing_factor
Low-level interface to FITPACK functions:
.. autosummary::
:toctree: generated/
splrep
splprep
splev
splint
sproot
spalde
bisplrep
bisplev
2-D Splines
===========
.. seealso:: scipy.ndimage.map_coordinates
.. autosummary::
:toctree: generated/
BivariateSpline
SmoothBivariateSpline
LSQBivariateSpline
Low-level interface to FITPACK functions:
.. autosummary::
:toctree: generated/
bisplrep
bisplev
Additional tools
================
.. autosummary::
:toctree: generated/
lagrange
approximate_taylor_polynomial
=======
.. automodule:: scipy.interpolate
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1 +0,0 @@
.. automodule:: scipy.io.arff

View file

@ -1,76 +0,0 @@
<<<<<<< HEAD
==================================
Input and output (:mod:`scipy.io`)
==================================
.. seealso:: :ref:`numpy-reference.routines.io` (in Numpy)
.. module:: scipy.io
MATLAB® files
=============
.. autosummary::
:toctree: generated/
loadmat
savemat
Matrix Market files
===================
.. autosummary::
:toctree: generated/
mminfo
mmread
mmwrite
Other
=====
.. autosummary::
:toctree: generated/
save_as_module
npfile
Wav sound files (:mod:`scipy.io.wavfile`)
=========================================
.. module:: scipy.io.wavfile
.. autosummary::
:toctree: generated/
read
write
Arff files (:mod:`scipy.io.arff`)
=================================
.. automodule:: scipy.io.arff
.. autosummary::
:toctree: generated/
loadarff
Netcdf (:mod:`scipy.io.netcdf`)
===============================
.. module:: scipy.io.netcdf
.. autosummary::
:toctree: generated/
netcdf_file
netcdf_variable
=======
.. automodule:: scipy.io
.. toctree::
:hidden:
scipy.io.arff
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1 +0,0 @@
.. automodule:: scipy.linalg.blas

View file

@ -1 +0,0 @@
.. automodule:: scipy.linalg.cython_blas

View file

@ -1 +0,0 @@
.. automodule:: scipy.linalg.cython_lapack

View file

@ -1 +0,0 @@
.. automodule:: scipy.linalg.interpolative

View file

@ -1 +0,0 @@
.. automodule:: scipy.linalg.lapack

View file

@ -1,108 +0,0 @@
<<<<<<< HEAD
====================================
Linear algebra (:mod:`scipy.linalg`)
====================================
.. module:: scipy.linalg
Basics
======
.. autosummary::
:toctree: generated/
inv
solve
solve_banded
solveh_banded
det
norm
lstsq
pinv
pinv2
Eigenvalue Problem
==================
.. autosummary::
:toctree: generated/
eig
eigvals
eigh
eigvalsh
eig_banded
eigvals_banded
Decompositions
==============
.. autosummary::
:toctree: generated/
lu
lu_factor
lu_solve
svd
svdvals
diagsvd
orth
cholesky
cholesky_banded
cho_factor
cho_solve
cho_solve_banded
qr
schur
rsf2csf
hessenberg
Matrix Functions
================
.. autosummary::
:toctree: generated/
expm
expm2
expm3
logm
cosm
sinm
tanm
coshm
sinhm
tanhm
signm
sqrtm
funm
Special Matrices
================
.. autosummary::
:toctree: generated/
block_diag
circulant
companion
hadamard
hankel
kron
leslie
toeplitz
tri
tril
triu
=======
.. automodule:: scipy.linalg
.. toctree::
:hidden:
linalg.blas
linalg.lapack
linalg.cython_blas
linalg.cython_lapack
linalg.interpolative
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1,88 +0,0 @@
================================================
Maximum entropy models (:mod:`scipy.maxentropy`)
================================================
.. automodule:: scipy.maxentropy
Models
======
.. autoclass:: scipy.maxentropy.basemodel
.. autosummary::
:toctree: generated/
basemodel.beginlogging
basemodel.endlogging
basemodel.clearcache
basemodel.crossentropy
basemodel.dual
basemodel.fit
basemodel.grad
basemodel.log
basemodel.logparams
basemodel.normconst
basemodel.reset
basemodel.setcallback
basemodel.setparams
basemodel.setsmooth
.. autoclass:: scipy.maxentropy.model
.. autosummary::
:toctree: generated/
model.expectations
model.lognormconst
model.logpmf
model.pmf_function
model.setfeaturesandsamplespace
.. autoclass:: scipy.maxentropy.bigmodel
.. autosummary::
:toctree: generated/
bigmodel.estimate
bigmodel.logpdf
bigmodel.pdf
bigmodel.pdf_function
bigmodel.resample
bigmodel.setsampleFgen
bigmodel.settestsamples
bigmodel.stochapprox
bigmodel.test
.. autoclass:: scipy.maxentropy.conditionalmodel
.. autosummary::
:toctree: generated/
conditionalmodel.dual
conditionalmodel.expectations
conditionalmodel.fit
conditionalmodel.lognormconst
conditionalmodel.logpmf
Utilities
=========
.. autosummary::
:toctree: generated/
arrayexp
arrayexpcomplex
columnmeans
columnvariances
densefeaturematrix
densefeatures
dotprod
flatten
innerprod
innerprodtranspose
logsumexp
logsumexp_naive
robustlog
rowmeans
sample_wr
sparsefeaturematrix
sparsefeatures

View file

@ -1,14 +0,0 @@
<<<<<<< HEAD
==========================================
Miscellaneous routines (:mod:`scipy.misc`)
==========================================
.. warning::
This documentation is work-in-progress and unorganized.
.. automodule:: scipy.misc
:members:
=======
.. automodule:: scipy.misc
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1,126 +0,0 @@
<<<<<<< HEAD
=========================================================
Multi-dimensional image processing (:mod:`scipy.ndimage`)
=========================================================
.. module:: scipy.ndimage
Functions for multi-dimensional image processing.
Filters :mod:`scipy.ndimage.filters`
====================================
.. module:: scipy.ndimage.filters
.. autosummary::
:toctree: generated/
convolve
convolve1d
correlate
correlate1d
gaussian_filter
gaussian_filter1d
gaussian_gradient_magnitude
gaussian_laplace
generic_filter
generic_filter1d
generic_gradient_magnitude
generic_laplace
laplace
maximum_filter
maximum_filter1d
median_filter
minimum_filter
minimum_filter1d
percentile_filter
prewitt
rank_filter
sobel
uniform_filter
uniform_filter1d
Fourier filters :mod:`scipy.ndimage.fourier`
============================================
.. module:: scipy.ndimage.fourier
.. autosummary::
:toctree: generated/
fourier_ellipsoid
fourier_gaussian
fourier_shift
fourier_uniform
Interpolation :mod:`scipy.ndimage.interpolation`
================================================
.. module:: scipy.ndimage.interpolation
.. autosummary::
:toctree: generated/
affine_transform
geometric_transform
map_coordinates
rotate
shift
spline_filter
spline_filter1d
zoom
Measurements :mod:`scipy.ndimage.measurements`
==============================================
.. module:: scipy.ndimage.measurements
.. autosummary::
:toctree: generated/
center_of_mass
extrema
find_objects
histogram
label
maximum
maximum_position
mean
minimum
minimum_position
standard_deviation
sum
variance
watershed_ift
Morphology :mod:`scipy.ndimage.morphology`
==========================================
.. module:: scipy.ndimage.morphology
.. autosummary::
:toctree: generated/
binary_closing
binary_dilation
binary_erosion
binary_fill_holes
binary_hit_or_miss
binary_opening
binary_propagation
black_tophat
distance_transform_bf
distance_transform_cdt
distance_transform_edt
generate_binary_structure
grey_closing
grey_dilation
grey_erosion
grey_opening
iterate_structure
morphological_gradient
morphological_laplace
white_tophat
=======
.. automodule:: scipy.ndimage
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1,37 +0,0 @@
<<<<<<< HEAD
=================================================
Orthogonal distance regression (:mod:`scipy.odr`)
=================================================
.. automodule:: scipy.odr
.. autoclass:: Data
.. automethod:: set_meta
.. autoclass:: Model
.. automethod:: set_meta
.. autoclass:: ODR
.. automethod:: restart
.. automethod:: run
.. automethod:: set_iprint
.. automethod:: set_job
.. autoclass:: Output
.. automethod:: pprint
.. autoexception:: odr_error
.. autoexception:: odr_stop
.. autofunction:: odr
=======
.. automodule:: scipy.odr
>>>>>>> 85b42d3bbdcef5cbe0fe2390bba8b3ff1608040b

View file

@ -1,8 +0,0 @@
.. _optimize.linprog-simplex:
linprog(method='Simplex')
----------------------------------------
.. scipy-optimize:function:: scipy.optimize.linprog
:impl: scipy.optimize._linprog._linprog_simplex
:method: simplex

View file

@ -1,8 +0,0 @@
.. _optimize.minimize-bfgs:
minimize(method='BFGS')
----------------------------------------
.. scipy-optimize:function:: scipy.optimize.minimize
:impl: scipy.optimize.optimize._minimize_bfgs
:method: BFGS

View file

@ -1,8 +0,0 @@
.. _optimize.minimize-cg:
minimize(method='CG')
----------------------------------------
.. scipy-optimize:function:: scipy.optimize.minimize
:impl: scipy.optimize.optimize._minimize_cg
:method: CG

Some files were not shown because too many files have changed in this diff Show more